diff --git a/BUILD b/BUILD index 60c67d75fd73..7410e42bff58 100644 --- a/BUILD +++ b/BUILD @@ -43,6 +43,14 @@ filegroup( "//jenkins:all-srcs", "//jobs:all-srcs", "//kettle:all-srcs", + "//kubernetes/cluster:all-srcs", + "//kubernetes/docs:all-srcs", + "//kubernetes/examples:all-srcs", + "//kubernetes/hack:all-srcs", + "//kubernetes/test/e2e/testing-manifests:all-srcs", + "//kubernetes/test/images:all-srcs", + "//kubernetes/test/kubemark:all-srcs", + "//kubernetes/third_party/htpasswd:all-srcs", "//kubetest:all-srcs", "//label_sync:all-srcs", "//logexporter/cmd:all-srcs", @@ -51,6 +59,7 @@ filegroup( "//maintenance/migratestatus:all-srcs", "//metrics:all-srcs", "//mungegithub:all-srcs", + "//perfdash:all-srcs", "//prow:all-srcs", "//queue_health/graph:all-srcs", "//robots/commenter:all-srcs", diff --git a/Gopkg.lock b/Gopkg.lock index 2182a6e060ef..2482aa2f3027 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,12 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + branch = "master" + name = "bitbucket.org/bertimus9/systemstat" + packages = ["."] + revision = "6edb7bbcb021f6510db33e604f7e18861293a14a" + [[projects]] name = "bitbucket.org/ww/goautoneg" packages = ["."] @@ -113,6 +119,105 @@ packages = ["."] revision = "02cc386c183a1bf89dcac8c8008bb465f69d3340" +[[projects]] + name = "github.com/codegangsta/negroni" + packages = ["."] + revision = "5dbbc83f748fc3ad38585842b0aedab546d0ea1e" + version = "v0.3.0" + +[[projects]] + name = "github.com/coreos/bbolt" + packages = ["."] + revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9" + version = "v1.3.0" + +[[projects]] + name = "github.com/coreos/etcd" + packages = [ + "alarm", + "auth", + "auth/authpb", + "client", + "clientv3", + "compactor", + "discovery", + "error", + "etcdserver", + "etcdserver/api", + "etcdserver/api/v2http/httptypes", + "etcdserver/api/v3rpc/rpctypes", + "etcdserver/etcdserverpb", + "etcdserver/membership", + "etcdserver/stats", + "lease", + "lease/leasehttp", + "lease/leasepb", + "mvcc", + "mvcc/backend", + "mvcc/mvccpb", + "pkg/adt", + "pkg/contention", + "pkg/cpuutil", + "pkg/crc", + "pkg/fileutil", + "pkg/httputil", + "pkg/idutil", + "pkg/ioutil", + "pkg/logutil", + "pkg/monotime", + "pkg/netutil", + "pkg/pathutil", + "pkg/pbutil", + "pkg/runtime", + "pkg/schedule", + "pkg/srv", + "pkg/tlsutil", + "pkg/transport", + "pkg/types", + "pkg/wait", + "raft", + "raft/raftpb", + "rafthttp", + "snap", + "snap/snappb", + "store", + "version", + "wal", + "wal/walpb" + ] + revision = "1b3ac99e8a431b381e633802cc42fe70e663baf5" + version = "v3.2.15" + +[[projects]] + name = "github.com/coreos/go-semver" + packages = ["semver"] + revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6" + version = "v0.2.0" + +[[projects]] + name = "github.com/coreos/go-systemd" + packages = ["journal"] + revision = "d2196463941895ee908e13531a23a39feb9e1243" + version = "v15" + +[[projects]] + name = "github.com/coreos/pkg" + packages = ["capnslog"] + revision = "3ac0863d7acf3bc44daf49afef8919af12f704ef" + version = "v3" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29" + version = "v3.1.0" + [[projects]] name = "github.com/docker/distribution" packages = [ @@ -160,6 +265,12 @@ revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52" version = "v0.3.2" +[[projects]] + name = "github.com/elazarl/goproxy" + packages = ["."] + revision = "0a5b8bdb09049ae4ae68e75437ded0b7a2e324e3" + version = "v1.0" + [[projects]] name = "github.com/emicklei/go-restful" packages = [ @@ -169,6 +280,21 @@ revision = "2dd44038f0b95ae693b266c5f87593b5d2fdd78d" version = "v2.5.0" +[[projects]] + branch = "master" + name = "github.com/evanphx/json-patch" + packages = ["."] + revision = "944e07253867aacae43c04b2e6a239005443f33a" + +[[projects]] + name = "github.com/garyburd/redigo" + packages = [ + "internal", + "redis" + ] + revision = "d1ed5c67e5794de818ea85e6b522fda02623a484" + version = "v1.4.0" + [[projects]] name = "github.com/ghodss/yaml" packages = ["."] @@ -303,6 +429,28 @@ revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" version = "v2.0.0" +[[projects]] + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions" + ] + revision = "ee43cbb60db7bd22502942cccbc39059117352ab" + version = "v0.1.0" + +[[projects]] + name = "github.com/gorilla/context" + packages = ["."] + revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a" + version = "v1.1" + +[[projects]] + name = "github.com/gorilla/mux" + packages = ["."] + revision = "53c1911da2b537f792e7cafcb446b05ffe33b996" + version = "v1.6.1" + [[projects]] name = "github.com/gregjones/httpcache" packages = [ @@ -311,6 +459,18 @@ ] revision = "16db777d8ebea60a3e746a7e0703eaaa53f85f7f" +[[projects]] + branch = "master" + name = "github.com/howeyc/gopass" + packages = ["."] + revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8" + +[[projects]] + name = "github.com/imdario/mergo" + packages = ["."] + revision = "163f41321a19dd09362d4c63cc2489db2015f1f4" + version = "0.3.2" + [[projects]] name = "github.com/inconshreveable/mousetrap" packages = ["."] @@ -351,6 +511,24 @@ packages = ["."] revision = "0b12d6b5" +[[projects]] + name = "github.com/jonboulle/clockwork" + packages = ["."] + revision = "2eee05ed794112d45db504eb05aa693efd2b8b09" + version = "v0.1.0" + +[[projects]] + name = "github.com/json-iterator/go" + packages = ["."] + revision = "28452fcdec4e44348d2af0d91d1e9e38da3a9e0a" + version = "1.0.5" + +[[projects]] + name = "github.com/juju/ratelimit" + packages = ["."] + revision = "59fac5042749a5afb9af70e813da1dd5474f0167" + version = "1.0.1" + [[projects]] branch = "master" name = "github.com/mailru/easyjson" @@ -488,13 +666,40 @@ packages = ["."] revision = "7aff26db30c1be810f9de5038ec5ef96ac41fd7c" +[[projects]] + name = "github.com/ugorji/go" + packages = ["codec"] + revision = "9831f2c3ac1068a78f50999a30db84270f647af6" + version = "v1.1" + +[[projects]] + name = "github.com/xiang90/probing" + packages = ["."] + revision = "07dd2e8dfe18522e9c447ba95f2fe95262f63bb2" + version = "0.0.1" + +[[projects]] + name = "github.com/xyproto/pinterface" + packages = ["."] + revision = "05fa0e3080664c781c213d0a890a9d22718f7afb" + version = "4.1" + +[[projects]] + name = "github.com/xyproto/simpleredis" + packages = ["."] + revision = "a42917321d55fd8b3a1a08affb5fc49fa43c7f8c" + version = "2.3" + [[projects]] name = "golang.org/x/crypto" packages = [ + "bcrypt", + "blowfish", "curve25519", "ed25519", "ed25519/internal/edwards25519", - "ssh" + "ssh", + "ssh/terminal" ] revision = "81e90905daefcd6fd217b62423c0908922eadb30" @@ -626,6 +831,7 @@ "encoding", "grpclb/grpc_lb_v1/messages", "grpclog", + "health/grpc_health_v1", "internal", "keepalive", "metadata", @@ -662,34 +868,87 @@ [[projects]] branch = "master" name = "k8s.io/api" - packages = ["core/v1"] + packages = [ + "admission/v1beta1", + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1" + ] revision = "fbe336854453ac8e27bffe14e1964555245cbd05" +[[projects]] + branch = "master" + name = "k8s.io/apiextensions-apiserver" + packages = ["pkg/features"] + revision = "ed43f04e5359fe96ddec30cceeb7053be9300bd1" + [[projects]] branch = "release-1.9" name = "k8s.io/apimachinery" packages = [ + "pkg/api/errors", + "pkg/api/meta", "pkg/api/resource", + "pkg/apimachinery", + "pkg/apimachinery/announced", + "pkg/apimachinery/registered", + "pkg/apis/meta/internalversion", "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1alpha1", "pkg/conversion", "pkg/conversion/queryparams", "pkg/fields", "pkg/labels", "pkg/runtime", "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", "pkg/selection", "pkg/types", "pkg/util/clock", "pkg/util/errors", + "pkg/util/framer", "pkg/util/intstr", "pkg/util/json", "pkg/util/net", + "pkg/util/rand", "pkg/util/runtime", "pkg/util/sets", "pkg/util/validation", "pkg/util/validation/field", "pkg/util/wait", "pkg/util/yaml", + "pkg/version", "pkg/watch", "third_party/forked/golang/reflect" ] @@ -698,9 +957,67 @@ [[projects]] branch = "release-1.9" name = "k8s.io/apiserver" - packages = ["pkg/util/flag"] + packages = [ + "pkg/features", + "pkg/util/feature", + "pkg/util/flag", + "pkg/util/logs" + ] revision = "91e14f394e4796abf5a994a349a222e7081d86b6" +[[projects]] + name = "k8s.io/client-go" + packages = [ + "discovery", + "kubernetes", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/core/v1", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/networking/v1", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1beta1", + "pkg/version", + "rest", + "rest/watch", + "tools/auth", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/metrics", + "tools/reference", + "transport", + "util/cert", + "util/flowcontrol", + "util/homedir", + "util/integer" + ] + revision = "78700dec6369ba22221b72770783300f143df150" + version = "v6.0.0" + [[projects]] name = "k8s.io/contrib" packages = ["test-utils/utils"] @@ -712,9 +1029,110 @@ packages = ["pkg/common"] revision = "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3" +[[projects]] + name = "k8s.io/kubernetes" + packages = [ + "pkg/api/legacyscheme", + "pkg/api/ref", + "pkg/apis/admissionregistration", + "pkg/apis/admissionregistration/install", + "pkg/apis/admissionregistration/v1alpha1", + "pkg/apis/admissionregistration/v1beta1", + "pkg/apis/apps", + "pkg/apis/apps/install", + "pkg/apis/apps/v1", + "pkg/apis/apps/v1beta1", + "pkg/apis/apps/v1beta2", + "pkg/apis/authentication", + "pkg/apis/authentication/install", + "pkg/apis/authentication/v1", + "pkg/apis/authentication/v1beta1", + "pkg/apis/authorization", + "pkg/apis/authorization/install", + "pkg/apis/authorization/v1", + "pkg/apis/authorization/v1beta1", + "pkg/apis/autoscaling", + "pkg/apis/autoscaling/install", + "pkg/apis/autoscaling/v1", + "pkg/apis/autoscaling/v2beta1", + "pkg/apis/batch", + "pkg/apis/batch/install", + "pkg/apis/batch/v1", + "pkg/apis/batch/v1beta1", + "pkg/apis/batch/v2alpha1", + "pkg/apis/certificates", + "pkg/apis/certificates/install", + "pkg/apis/certificates/v1beta1", + "pkg/apis/componentconfig", + "pkg/apis/componentconfig/install", + "pkg/apis/componentconfig/v1alpha1", + "pkg/apis/core", + "pkg/apis/core/install", + "pkg/apis/core/v1", + "pkg/apis/events", + "pkg/apis/events/install", + "pkg/apis/events/v1beta1", + "pkg/apis/extensions", + "pkg/apis/extensions/install", + "pkg/apis/extensions/v1beta1", + "pkg/apis/networking", + "pkg/apis/networking/install", + "pkg/apis/networking/v1", + "pkg/apis/policy", + "pkg/apis/policy/install", + "pkg/apis/policy/v1beta1", + "pkg/apis/rbac", + "pkg/apis/rbac/install", + "pkg/apis/rbac/v1", + "pkg/apis/rbac/v1alpha1", + "pkg/apis/rbac/v1beta1", + "pkg/apis/scheduling", + "pkg/apis/scheduling/install", + "pkg/apis/scheduling/v1alpha1", + "pkg/apis/settings", + "pkg/apis/settings/install", + "pkg/apis/settings/v1alpha1", + "pkg/apis/storage", + "pkg/apis/storage/install", + "pkg/apis/storage/v1", + "pkg/apis/storage/v1alpha1", + "pkg/apis/storage/v1beta1", + "pkg/client/clientset_generated/internalclientset", + "pkg/client/clientset_generated/internalclientset/scheme", + "pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/core/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/events/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", + "pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", + "pkg/features", + "pkg/kubelet/apis", + "pkg/master/ports", + "pkg/util/parsers", + "pkg/util/pointer", + "test/e2e/perftype", + "test/images/net/common", + "test/images/net/nat", + "test/images/resource-consumer/common", + "third_party/forked/etcd221/pkg/fileutil", + "third_party/forked/etcd221/wal" + ] + revision = "5fa2db2bd46ac79e5e00a4e6ed24191080aa463b" + version = "v1.9.2" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "63d5f8d9680977e4c44ccd74e0395506ba6937a7e663eed3226ae3e3852fcda5" + inputs-digest = "5279d6466fc56d138f8480958c168f63d7917e5b98ea7a0fda73fb68ca6e252f" solver-name = "gps-cdcl" solver-version = 1 diff --git a/perfdash/.gitignore b/perfdash/.gitignore new file mode 100644 index 000000000000..3d19dd53f272 --- /dev/null +++ b/perfdash/.gitignore @@ -0,0 +1,2 @@ +/.idea/ +/perfdash diff --git a/perfdash/BUILD b/perfdash/BUILD new file mode 100644 index 000000000000..4d158693c35c --- /dev/null +++ b/perfdash/BUILD @@ -0,0 +1,46 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "downloader.go", + "google-gcs-downloader.go", + "parser.go", + "perfdash.go", + "pod_names.go", + ], + importpath = "k8s.io/test-infra/perfdash", + visibility = ["//visibility:private"], + deps = [ + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/k8s.io/contrib/test-utils/utils:go_default_library", + "//vendor/k8s.io/kubernetes/test/e2e/perftype:go_default_library", + ], +) + +go_binary( + name = "perfdash", + embed = [":go_default_library"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["pod_names_test.go"], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/perfdash/Dockerfile b/perfdash/Dockerfile new file mode 100644 index 000000000000..16408a0b8856 --- /dev/null +++ b/perfdash/Dockerfile @@ -0,0 +1,21 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM google/debian:jessie +MAINTAINER Brendan Burns +RUN apt-get update +RUN apt-get install -y -qq ca-certificates +ADD perfdash /perfdash +ADD www /www + diff --git a/perfdash/Makefile b/perfdash/Makefile new file mode 100644 index 000000000000..0437b394ee17 --- /dev/null +++ b/perfdash/Makefile @@ -0,0 +1,20 @@ +all: push + +# See pod.yaml for the version currently running-- bump this ahead before rebuilding! +TAG = 0.11 + +PROJ = google_containers + +.PHONY: perfdash +perfdash: + cd .. && bazel build perfdash + cp ../bazel-bin/perfdash/linux_amd64_stripped/perfdash . + +container: perfdash + docker build --pull -t gcr.io/$(PROJ)/perfdash:$(TAG) . + +push: container + gcloud docker -- push gcr.io/$(PROJ)/perfdash:$(TAG) + +clean: + rm -f perfdash diff --git a/perfdash/OWNERS b/perfdash/OWNERS new file mode 100644 index 000000000000..4303332ee373 --- /dev/null +++ b/perfdash/OWNERS @@ -0,0 +1,10 @@ +approvers: +- Random-Liu +- gmarek +- porridge +- shyamjvs +reviewers: +- Random-Liu +- gmarek +- porridge +- shyamjvs diff --git a/perfdash/config.go b/perfdash/config.go new file mode 100644 index 000000000000..9c75d79d6f7b --- /dev/null +++ b/perfdash/config.go @@ -0,0 +1,147 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "github.com/ghodss/yaml" + "io/ioutil" + "k8s.io/contrib/test-utils/utils" + "net/http" + "os" + "strings" +) + +// To add new e2e test support, you need to: +// 1) Transform e2e performance test result into *PerfData* in k8s/kubernetes/test/e2e/perftype, +// and print the PerfData in e2e test log. +// 2) Add corresponding bucket, job and test into *TestConfig*. + +// TestDescription contains test name, output file prefix and parser function. +type TestDescription struct { + Name string + OutputFilePrefix string + Parser func(data []byte, buildNumber int, testResult *BuildData) +} + +// Tests is a map from test label to test description. +type Tests struct { + Prefix string + Descriptions map[string]TestDescription +} + +// Jobs is a map from job name to all supported tests in the job. +type Jobs map[string]Tests + +// Buckets is a map from bucket url to all supported jobs in the bucket. +type Buckets map[string]Jobs + +var ( + // performanceDescriptions contains metrics exported by a --ginko.focus=[Feature:Performance] + // e2e test + performanceDescriptions = map[string]TestDescription{ + "DensityResponsiveness": { + Name: "density", + OutputFilePrefix: "APIResponsiveness", + Parser: parseResponsivenessData, + }, + "DensityResources": { + Name: "density", + OutputFilePrefix: "ResourceUsageSummary", + Parser: parseResourceUsageData, + }, + "DensityPodStartup": { + Name: "density", + OutputFilePrefix: "PodStartupLatency", + Parser: parseResponsivenessData, + }, + "DensityTestPhaseTimer": { + Name: "density", + OutputFilePrefix: "TestPhaseTimer", + Parser: parseResponsivenessData, + }, + "LoadResponsiveness": { + Name: "load", + OutputFilePrefix: "APIResponsiveness", + Parser: parseResponsivenessData, + }, + "LoadResources": { + Name: "load", + OutputFilePrefix: "ResourceUsageSummary", + Parser: parseResourceUsageData, + }, + "LoadTestPhaseTimer": { + Name: "load", + OutputFilePrefix: "TestPhaseTimer", + Parser: parseResponsivenessData, + }, + } + + // TestConfig contains all the test PerfDash supports now. Downloader will download and + // analyze build log from all these Jobs, and parse the data from all these Test. + // Notice that all the tests should have different name for now. + TestConfig = Buckets{utils.KubekinsBucket: getProwConfigOrDie()} +) + +func getProwConfigOrDie() Jobs { + jobs, err := getProwConfig() + if err != nil { + panic(err) + } + return jobs +} + +// Minimal subset of the prow config definition at k8s.io/test-infra/prow/config +type config struct { + Periodics []periodic `json:"periodics"` +} +type periodic struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +func getProwConfig() (Jobs, error) { + fmt.Fprintf(os.Stderr, "Fetching prow config from GitHub...\n") + resp, err := http.Get("https://raw.githubusercontent.com/kubernetes/test-infra/master/prow/config.yaml") + if err != nil { + return nil, fmt.Errorf("error fetching prow config from GitHub: %v", err) + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error reading prow config from GitHub: %v", err) + } + conf := &config{} + if err := yaml.Unmarshal(b, conf); err != nil { + return nil, fmt.Errorf("error unmarshaling prow config from GitHub: %v", err) + } + jobs := Jobs{} + for _, periodic := range conf.Periodics { + for _, tag := range periodic.Tags { + if strings.HasPrefix(tag, "perfDashPrefix:") { + split := strings.SplitN(tag, ":", 2) + jobs[periodic.Name] = Tests{ + Prefix: strings.TrimSpace(split[1]), + Descriptions: performanceDescriptions, + } + break + } + } + } + fmt.Printf("Read config with %d jobs\n", len(jobs)) + return jobs, nil +} diff --git a/perfdash/deployment.yaml b/perfdash/deployment.yaml new file mode 100644 index 000000000000..b6693ce57147 --- /dev/null +++ b/perfdash/deployment.yaml @@ -0,0 +1,44 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: perfdash + namespace: default + labels: + app: perfdash + version: "0.11" +spec: + selector: + matchLabels: + app: perfdash + template: + metadata: + labels: + app: perfdash + spec: + containers: + - name: perfdash + image: gcr.io/google_containers/perfdash:0.11 + command: + - /perfdash + - --www=true + - --dir=/www + - --address=0.0.0.0:8080 + - --builds=100 + imagePullPolicy: Always + ports: + - name: status + containerPort: 8080 + livenessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 10 + timeoutSeconds: 1 + resources: + requests: + cpu: 600m + memory: 1200Mi + limits: + cpu: 600m + memory: 1200Mi + restartPolicy: Always diff --git a/perfdash/downloader.go b/perfdash/downloader.go new file mode 100644 index 000000000000..49973f57fa30 --- /dev/null +++ b/perfdash/downloader.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "k8s.io/kubernetes/test/e2e/perftype" +) + +// Downloader is the interface that gets a data from a predefined source. +type Downloader interface { + getData() (TestToBuildData, error) +} + +// BuildData contains job name and a map from build number to perf data +type BuildData struct { + Builds map[string][]perftype.DataItem `json:"builds"` + Job string `json:"job"` + Version string `json:"version"` +} + +// TestToBuildData is a map from test name to BuildData pointer +// TODO(random-liu): Use a more complex data structure if we need to support more test in the future. +type TestToBuildData map[string]*BuildData + +func (b *TestToBuildData) ServeHTTP(res http.ResponseWriter, req *http.Request) { + data, err := json.Marshal(b) + if err != nil { + res.Header().Set("Content-type", "text/html") + res.WriteHeader(http.StatusInternalServerError) + res.Write([]byte(fmt.Sprintf("

Internal Error

%v", err))) + return + } + res.Header().Set("Content-type", "application/json") + res.WriteHeader(http.StatusOK) + res.Write(data) +} diff --git a/perfdash/google-gcs-downloader.go b/perfdash/google-gcs-downloader.go new file mode 100644 index 000000000000..54c65ededfc7 --- /dev/null +++ b/perfdash/google-gcs-downloader.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + "k8s.io/contrib/test-utils/utils" + "k8s.io/kubernetes/test/e2e/perftype" + "sync" +) + +// GoogleGCSDownloader that gets data about Google results from the GCS repository +type GoogleGCSDownloader struct { + Builds int + GoogleGCSBucketUtils *utils.Utils +} + +// NewGoogleGCSDownloader creates a new GoogleGCSDownloader +func NewGoogleGCSDownloader(builds int) *GoogleGCSDownloader { + return &GoogleGCSDownloader{ + Builds: builds, + GoogleGCSBucketUtils: utils.NewUtils(utils.KubekinsBucket, utils.LogDir), + } +} + +// TODO(random-liu): Only download and update new data each time. +func (g *GoogleGCSDownloader) getData() (TestToBuildData, error) { + newJobs, err := getProwConfig() + if err == nil { + TestConfig[utils.KubekinsBucket] = newJobs + } else { + fmt.Fprintf(os.Stderr, "Failed to refresh config: %v", err) + } + fmt.Print("Getting Data from GCS...\n") + result := make(TestToBuildData) + var resultLock sync.Mutex + var wg sync.WaitGroup + wg.Add(len(TestConfig[utils.KubekinsBucket])) + for job, tests := range TestConfig[utils.KubekinsBucket] { + if tests.Prefix == "" { + return result, fmt.Errorf("Invalid empty Prefix for job %s", job) + } + for testLabel := range tests.Descriptions { + testName := tests.Prefix + "-" + testLabel + resultLock.Lock() + if _, found := result[testName]; found { + return result, fmt.Errorf("Duplicate name %s", testName) + } + result[testName] = &BuildData{Job: job, Version: "", Builds: map[string][]perftype.DataItem{}} + resultLock.Unlock() + } + go g.getJobData(&wg, result, &resultLock, job, tests) + } + wg.Wait() + return result, nil +} + +func (g *GoogleGCSDownloader) getJobData(wg *sync.WaitGroup, result TestToBuildData, resultLock *sync.Mutex, job string, tests Tests) { + defer wg.Done() + lastBuildNo, err := g.GoogleGCSBucketUtils.GetLastestBuildNumberFromJenkinsGoogleBucket(job) + if err != nil { + panic(err) + } + fmt.Printf("Last build no for %v: %v\n", job, lastBuildNo) + + for buildNumber := lastBuildNo; buildNumber > lastBuildNo-g.Builds && buildNumber > 0; buildNumber-- { + fmt.Printf("Fetching %s build %v...\n", job, buildNumber) + for testLabel, testDescription := range tests.Descriptions { + fileStem := fmt.Sprintf("artifacts/%v_%v", testDescription.OutputFilePrefix, testDescription.Name) + artifacts, err := g.GoogleGCSBucketUtils.ListFilesInBuild(job, buildNumber, fileStem) + if err != nil || len(artifacts) == 0 { + fmt.Printf("Error while looking for %s* in %s build %v: %v\n", fileStem, job, buildNumber, err) + continue + } + metricsFilename := artifacts[0][strings.LastIndex(artifacts[0], "/")+1:] + if len(artifacts) > 1 { + fmt.Printf("WARNING: found multiple %s files with data, reading only one: %s\n", fileStem, metricsFilename) + } + testDataResponse, err := g.GoogleGCSBucketUtils.GetFileFromJenkinsGoogleBucket(job, buildNumber, fmt.Sprintf("artifacts/%v", metricsFilename)) + if err != nil { + panic(err) + } + + func() { + testDataBody := testDataResponse.Body + defer testDataBody.Close() + data, err := ioutil.ReadAll(testDataBody) + if err != nil { + fmt.Fprintf(os.Stderr, "Error when reading response Body: %v\n", err) + return + } + testName := tests.Prefix + "-" + testLabel + resultLock.Lock() + buildData := result[testName] + resultLock.Unlock() + testDescription.Parser(data, buildNumber, buildData) + }() + } + } +} diff --git a/perfdash/parser.go b/perfdash/parser.go new file mode 100644 index 000000000000..2790b64452a7 --- /dev/null +++ b/perfdash/parser.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "fmt" + "os" + + "k8s.io/kubernetes/test/e2e/perftype" + "math" +) + +func stripCount(data *perftype.DataItem) { + delete(data.Labels, "Count") +} + +func parseResponsivenessData(data []byte, buildNumber int, testResult *BuildData) { + build := fmt.Sprintf("%d", buildNumber) + obj := perftype.PerfData{} + if err := json.Unmarshal(data, &obj); err != nil { + fmt.Fprintf(os.Stderr, "error parsing JSON in build %d: %v %s\n", buildNumber, err, string(data)) + return + } + if testResult.Version == "" { + testResult.Version = obj.Version + } + if testResult.Version == obj.Version { + for i := range obj.DataItems { + stripCount(&obj.DataItems[i]) + testResult.Builds[build] = append(testResult.Builds[build], obj.DataItems[i]) + } + } +} + +type resourceUsagePercentiles map[string][]resourceUsages + +type resourceUsages struct { + Name string `json:"Name"` + Cpu float64 `json:"Cpu"` + Memory int `json:"Mem"` +} + +type resourceUsage struct { + Cpu float64 + Memory float64 +} +type usageAtPercentiles map[string]resourceUsage +type podNameToUsage map[string]usageAtPercentiles + +func parseResourceUsageData(data []byte, buildNumber int, testResult *BuildData) { + testResult.Version = "v1" + build := fmt.Sprintf("%d", buildNumber) + var obj resourceUsagePercentiles + if err := json.Unmarshal(data, &obj); err != nil { + fmt.Fprintf(os.Stderr, "error parsing JSON in build %d: %v %s\n", buildNumber, err, string(data)) + return + } + usage := make(podNameToUsage) + for percentile, items := range obj { + for _, item := range items { + name := RemoveDisambiguationInfixes(item.Name) + if _, ok := usage[name]; !ok { + usage[name] = make(usageAtPercentiles) + } + cpu, memory := float64(item.Cpu), float64(item.Memory) + if otherUsage, ok := usage[name][percentile]; ok { + // Note that we take max of each resource separately, potentially manufacturing a + // "franken-sample" which was never seen in the wild. We do this hoping that such result + // will be more stable across runs. + cpu = math.Max(cpu, otherUsage.Cpu) + memory = math.Max(memory, otherUsage.Memory) + } + usage[name][percentile] = resourceUsage{cpu, memory} + } + } + for podName, usageAtPercentiles := range usage { + cpu := perftype.DataItem{Unit: "cores", Labels: map[string]string{"PodName": podName, "Resource": "CPU"}, Data: make(map[string]float64)} + memory := perftype.DataItem{Unit: "MiB", Labels: map[string]string{"PodName": podName, "Resource": "memory"}, Data: make(map[string]float64)} + for percentile, usage := range usageAtPercentiles { + cpu.Data[percentile] = usage.Cpu + memory.Data[percentile] = usage.Memory / (1024 * 1024) + } + testResult.Builds[build] = append(testResult.Builds[build], cpu) + testResult.Builds[build] = append(testResult.Builds[build], memory) + } +} diff --git a/perfdash/perfdash-service.yaml b/perfdash/perfdash-service.yaml new file mode 100644 index 000000000000..ecebe1de6e27 --- /dev/null +++ b/perfdash/perfdash-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: perfdash-status + labels: + app: perfdash +spec: + selector: + app: perfdash + ports: + - name: status + port: 80 + targetPort: status + type: LoadBalancer diff --git a/perfdash/perfdash.go b/perfdash/perfdash.go new file mode 100644 index 000000000000..98705aa2f6c3 --- /dev/null +++ b/perfdash/perfdash.go @@ -0,0 +1,88 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "net/http" + "os" + "time" +) + +const ( + pollDuration = 10 * time.Minute + errorDelay = 10 * time.Second + maxBuilds = 100 +) + +var ( + addr = flag.String("address", ":8080", "The address to serve web data on") + www = flag.Bool("www", false, "If true, start a web-server to server performance data") + wwwDir = flag.String("dir", "www", "If non-empty, add a file server for this directory at the root of the web server") + builds = flag.Int("builds", maxBuilds, "Total builds number") +) + +func main() { + fmt.Print("Starting perfdash...\n") + flag.Parse() + + if *builds > maxBuilds || *builds < 0 { + fmt.Printf("Invalid number of builds: %d, setting to %d\n", *builds, maxBuilds) + *builds = maxBuilds + } + + // TODO(random-liu): Add a top layer downloader to download build log from different buckets when we support + // more buckets in the future. + downloader := NewGoogleGCSDownloader(*builds) + result := make(TestToBuildData) + var err error + + if !*www { + result, err = downloader.getData() + if err != nil { + fmt.Fprintf(os.Stderr, "Error fetching data: %v\n", err) + os.Exit(1) + } + prettyResult, err := json.MarshalIndent(result, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Error formating data: %v\n", err) + os.Exit(1) + } + fmt.Printf("Result: %v\n", string(prettyResult)) + return + } + + go func() { + for { + fmt.Printf("Fetching new data...\n") + result, err = downloader.getData() + if err != nil { + fmt.Fprintf(os.Stderr, "Error fetching data: %v\n", err) + time.Sleep(errorDelay) + continue + } + fmt.Printf("Data fetched, sleeping %v...\n", pollDuration) + time.Sleep(pollDuration) + } + }() + + http.Handle("/api", &result) + http.Handle("/", http.FileServer(http.Dir(*wwwDir))) + http.ListenAndServe(*addr, nil) +} diff --git a/perfdash/pod_names.go b/perfdash/pod_names.go new file mode 100644 index 000000000000..d16d048b794c --- /dev/null +++ b/perfdash/pod_names.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import "regexp" +import "strings" + +var /* const */ versionRegexp = regexp.MustCompile("^v[0-9].*") + +// RemoveDisambiguationInfixes removes (from pod/container names) version strings and hashes inserted replication controllers and the like. +func RemoveDisambiguationInfixes(podAndContainer string) string { + split := strings.SplitN(podAndContainer, "/", 2) + if len(split) < 2 { + return podAndContainer + } + pod, container := split[0], split[1] + pieces := strings.Split(pod, "-") + var last string + for i, piece := range pieces { + if looksLikeHash(piece) || versionRegexp.MatchString(piece) { + break + } + last = strings.Join(pieces[:i+1], "-") + } + return strings.Join([]string{last, container}, "/") +} + +// looksLikeHash returns true if piece seems to be one of those pseudo-random disambiguation strings +func looksLikeHash(piece string) bool { + return len(piece) >= 4 && !strings.ContainsAny(piece, "eyuioa") +} diff --git a/perfdash/pod_names_test.go b/perfdash/pod_names_test.go new file mode 100644 index 000000000000..ba57e7359520 --- /dev/null +++ b/perfdash/pod_names_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import "testing" + +func TestRemoveDisambiguationInfixes(t *testing.T) { + testCases := []struct{ Input, Output string }{ + {"v1", "v1"}, + {"v1-blah", "v1-blah"}, + {"etcd-empty-dir-cleanup-e2e-scalability-master/etcd-empty-dir-cleanup", "etcd-empty-dir-cleanup-e2e-scalability-master/etcd-empty-dir-cleanup"}, + {"etcd-server-e2e-scalability-master/etcd-container", "etcd-server-e2e-scalability-master/etcd-container"}, + {"etcd-server-events-e2e-scalability-master/etcd-container", "etcd-server-events-e2e-scalability-master/etcd-container"}, + {"event-exporter-v0.1.7-9d4dbb69c-sff9w/event-exporter", "event-exporter/event-exporter"}, + {"fluentd-gcp-v2.0.9-2dxjh/fluentd-gcp", "fluentd-gcp/fluentd-gcp"}, + {"heapster-v1.5.0-beta.0-64d4f4bdd8-ljnfd/eventer", "heapster/eventer"}, + {"kube-addon-manager-e2e-scalability-master/kube-addon-manager", "kube-addon-manager-e2e-scalability-master/kube-addon-manager"}, + {"kube-apiserver-e2e-scalability-master/kube-apiserver", "kube-apiserver-e2e-scalability-master/kube-apiserver"}, + {"kube-controller-manager-e2e-scalability-master/kube-controller-manager", "kube-controller-manager-e2e-scalability-master/kube-controller-manager"}, + {"kube-dns-74dbf45884-7gkmp/dnsmasq", "kube-dns/dnsmasq"}, + {"kubernetes-dashboard-765c6f47bd-sbrxj/kubernetes-dashboard", "kubernetes-dashboard/kubernetes-dashboard"}, + {"l7-default-backend-6d477bf555-jfmgt/default-http-backend", "l7-default-backend/default-http-backend"}, + {"l7-lb-controller-v0.9.7-e2e-scalability-master/l7-lb-controller", "l7-lb-controller/l7-lb-controller"}, + {"kube-proxy-e2e-scalability-minion-group-2mh1/kube-proxy", "kube-proxy-e2e-scalability-minion-group/kube-proxy"}, + } + for _, testCase := range testCases { + v := RemoveDisambiguationInfixes(testCase.Input) + if v != testCase.Output { + t.Error("For", testCase.Input, "expected", testCase.Output, "but got", v) + } + } +} diff --git a/perfdash/www/Chart.js b/perfdash/www/Chart.js new file mode 100644 index 000000000000..cd79b6df817c --- /dev/null +++ b/perfdash/www/Chart.js @@ -0,0 +1,3477 @@ +/*! + * Chart.js + * http://chartjs.org/ + * Version: 1.0.2 + * + * Copyright 2015 Nick Downie + * Released under the MIT license + * https://github.com/nnnick/Chart.js/blob/master/LICENSE.md + */ + + +(function(){ + + "use strict"; + + //Declare root variable - window in the browser, global on the server + var root = this, + previous = root.Chart; + + //Occupy the global variable of Chart, and create a simple base class + var Chart = function(context){ + var chart = this; + this.canvas = context.canvas; + + this.ctx = context; + + //Variables global to the chart + var computeDimension = function(element,dimension) + { + if (element['offset'+dimension]) + { + return element['offset'+dimension]; + } + else + { + return document.defaultView.getComputedStyle(element).getPropertyValue(dimension); + } + } + + var width = this.width = computeDimension(context.canvas,'Width'); + var height = this.height = computeDimension(context.canvas,'Height'); + + // Firefox requires this to work correctly + context.canvas.width = width; + context.canvas.height = height; + + var width = this.width = context.canvas.width; + var height = this.height = context.canvas.height; + this.aspectRatio = this.width / this.height; + //High pixel density displays - multiply the size of the canvas height/width by the device pixel ratio, then scale. + helpers.retinaScale(this); + + return this; + }; + //Globally expose the defaults to allow for user updating/changing + Chart.defaults = { + global: { + // Boolean - Whether to animate the chart + animation: true, + + // Number - Number of animation steps + animationSteps: 60, + + // String - Animation easing effect + animationEasing: "easeOutQuart", + + // Boolean - If we should show the scale at all + showScale: true, + + // Boolean - If we want to override with a hard coded scale + scaleOverride: false, + + // ** Required if scaleOverride is true ** + // Number - The number of steps in a hard coded scale + scaleSteps: null, + // Number - The value jump in the hard coded scale + scaleStepWidth: null, + // Number - The scale starting value + scaleStartValue: null, + + // String - Colour of the scale line + scaleLineColor: "rgba(0,0,0,.1)", + + // Number - Pixel width of the scale line + scaleLineWidth: 1, + + // Boolean - Whether to show labels on the scale + scaleShowLabels: true, + + // Interpolated JS string - can access value + scaleLabel: "<%=value%>", + + // Boolean - Whether the scale should stick to integers, and not show any floats even if drawing space is there + scaleIntegersOnly: true, + + // Boolean - Whether the scale should start at zero, or an order of magnitude down from the lowest value + scaleBeginAtZero: false, + + // String - Scale label font declaration for the scale label + scaleFontFamily: "'Helvetica Neue', 'Helvetica', 'Arial', sans-serif", + + // Number - Scale label font size in pixels + scaleFontSize: 12, + + // String - Scale label font weight style + scaleFontStyle: "normal", + + // String - Scale label font colour + scaleFontColor: "#666", + + // Boolean - whether or not the chart should be responsive and resize when the browser does. + responsive: false, + + // Boolean - whether to maintain the starting aspect ratio or not when responsive, if set to false, will take up entire container + maintainAspectRatio: true, + + // Boolean - Determines whether to draw tooltips on the canvas or not - attaches events to touchmove & mousemove + showTooltips: true, + + // Boolean - Determines whether to draw built-in tooltip or call custom tooltip function + customTooltips: false, + + // Array - Array of string names to attach tooltip events + tooltipEvents: ["mousemove", "touchstart", "touchmove", "mouseout"], + + // String - Tooltip background colour + tooltipFillColor: "rgba(0,0,0,0.8)", + + // String - Tooltip label font declaration for the scale label + tooltipFontFamily: "'Helvetica Neue', 'Helvetica', 'Arial', sans-serif", + + // Number - Tooltip label font size in pixels + tooltipFontSize: 14, + + // String - Tooltip font weight style + tooltipFontStyle: "normal", + + // String - Tooltip label font colour + tooltipFontColor: "#fff", + + // String - Tooltip title font declaration for the scale label + tooltipTitleFontFamily: "'Helvetica Neue', 'Helvetica', 'Arial', sans-serif", + + // Number - Tooltip title font size in pixels + tooltipTitleFontSize: 14, + + // String - Tooltip title font weight style + tooltipTitleFontStyle: "bold", + + // String - Tooltip title font colour + tooltipTitleFontColor: "#fff", + + // Number - pixel width of padding around tooltip text + tooltipYPadding: 6, + + // Number - pixel width of padding around tooltip text + tooltipXPadding: 6, + + // Number - Size of the caret on the tooltip + tooltipCaretSize: 8, + + // Number - Pixel radius of the tooltip border + tooltipCornerRadius: 6, + + // Number - Pixel offset from point x to tooltip edge + tooltipXOffset: 10, + + // String - Template string for single tooltips + tooltipTemplate: "<%if (label){%><%=label%>: <%}%><%= value %>", + + // String - Template string for single tooltips + multiTooltipTemplate: "<%= value %>", + + // String - Colour behind the legend colour block + multiTooltipKeyBackground: '#fff', + + // Function - Will fire on animation progression. + onAnimationProgress: function(){}, + + // Function - Will fire on animation completion. + onAnimationComplete: function(){} + + } + }; + + //Create a dictionary of chart types, to allow for extension of existing types + Chart.types = {}; + + //Global Chart helpers object for utility methods and classes + var helpers = Chart.helpers = {}; + + //-- Basic js utility methods + var each = helpers.each = function(loopable,callback,self){ + var additionalArgs = Array.prototype.slice.call(arguments, 3); + // Check to see if null or undefined firstly. + if (loopable){ + if (loopable.length === +loopable.length){ + var i; + for (i=0; i= 0; i--) { + var currentItem = arrayToSearch[i]; + if (filterCallback(currentItem)){ + return currentItem; + } + } + }, + inherits = helpers.inherits = function(extensions){ + //Basic javascript inheritance based on the model created in Backbone.js + var parent = this; + var ChartElement = (extensions && extensions.hasOwnProperty("constructor")) ? extensions.constructor : function(){ return parent.apply(this, arguments); }; + + var Surrogate = function(){ this.constructor = ChartElement;}; + Surrogate.prototype = parent.prototype; + ChartElement.prototype = new Surrogate(); + + ChartElement.extend = inherits; + + if (extensions) extend(ChartElement.prototype, extensions); + + ChartElement.__super__ = parent.prototype; + + return ChartElement; + }, + noop = helpers.noop = function(){}, + uid = helpers.uid = (function(){ + var id=0; + return function(){ + return "chart-" + id++; + }; + })(), + warn = helpers.warn = function(str){ + //Method for warning of errors + if (window.console && typeof window.console.warn == "function") console.warn(str); + }, + amd = helpers.amd = (typeof define == 'function' && define.amd), + //-- Math methods + isNumber = helpers.isNumber = function(n){ + return !isNaN(parseFloat(n)) && isFinite(n); + }, + max = helpers.max = function(array){ + return Math.max.apply( Math, array ); + }, + min = helpers.min = function(array){ + return Math.min.apply( Math, array ); + }, + cap = helpers.cap = function(valueToCap,maxValue,minValue){ + if(isNumber(maxValue)) { + if( valueToCap > maxValue ) { + return maxValue; + } + } + else if(isNumber(minValue)){ + if ( valueToCap < minValue ){ + return minValue; + } + } + return valueToCap; + }, + getDecimalPlaces = helpers.getDecimalPlaces = function(num){ + if (num%1!==0 && isNumber(num)){ + return num.toString().split(".")[1].length; + } + else { + return 0; + } + }, + toRadians = helpers.radians = function(degrees){ + return degrees * (Math.PI/180); + }, + // Gets the angle from vertical upright to the point about a centre. + getAngleFromPoint = helpers.getAngleFromPoint = function(centrePoint, anglePoint){ + var distanceFromXCenter = anglePoint.x - centrePoint.x, + distanceFromYCenter = anglePoint.y - centrePoint.y, + radialDistanceFromCenter = Math.sqrt( distanceFromXCenter * distanceFromXCenter + distanceFromYCenter * distanceFromYCenter); + + + var angle = Math.PI * 2 + Math.atan2(distanceFromYCenter, distanceFromXCenter); + + //If the segment is in the top left quadrant, we need to add another rotation to the angle + if (distanceFromXCenter < 0 && distanceFromYCenter < 0){ + angle += Math.PI*2; + } + + return { + angle: angle, + distance: radialDistanceFromCenter + }; + }, + aliasPixel = helpers.aliasPixel = function(pixelWidth){ + return (pixelWidth % 2 === 0) ? 0 : 0.5; + }, + splineCurve = helpers.splineCurve = function(FirstPoint,MiddlePoint,AfterPoint,t){ + //Props to Rob Spencer at scaled innovation for his post on splining between points + //http://scaledinnovation.com/analytics/splines/aboutSplines.html + var d01=Math.sqrt(Math.pow(MiddlePoint.x-FirstPoint.x,2)+Math.pow(MiddlePoint.y-FirstPoint.y,2)), + d12=Math.sqrt(Math.pow(AfterPoint.x-MiddlePoint.x,2)+Math.pow(AfterPoint.y-MiddlePoint.y,2)), + fa=t*d01/(d01+d12),// scaling factor for triangle Ta + fb=t*d12/(d01+d12); + return { + inner : { + x : MiddlePoint.x-fa*(AfterPoint.x-FirstPoint.x), + y : MiddlePoint.y-fa*(AfterPoint.y-FirstPoint.y) + }, + outer : { + x: MiddlePoint.x+fb*(AfterPoint.x-FirstPoint.x), + y : MiddlePoint.y+fb*(AfterPoint.y-FirstPoint.y) + } + }; + }, + calculateOrderOfMagnitude = helpers.calculateOrderOfMagnitude = function(val){ + return Math.floor(Math.log(val) / Math.LN10); + }, + calculateScaleRange = helpers.calculateScaleRange = function(valuesArray, drawingSize, textSize, startFromZero, integersOnly){ + + //Set a minimum step of two - a point at the top of the graph, and a point at the base + var minSteps = 2, + maxSteps = Math.floor(drawingSize/(textSize * 1.5)), + skipFitting = (minSteps >= maxSteps); + + var maxValue = max(valuesArray), + minValue = min(valuesArray); + + // We need some degree of seperation here to calculate the scales if all the values are the same + // Adding/minusing 0.5 will give us a range of 1. + if (maxValue === minValue){ + maxValue += 0.5; + // So we don't end up with a graph with a negative start value if we've said always start from zero + if (minValue >= 0.5 && !startFromZero){ + minValue -= 0.5; + } + else{ + // Make up a whole number above the values + maxValue += 0.5; + } + } + + var valueRange = Math.abs(maxValue - minValue), + rangeOrderOfMagnitude = calculateOrderOfMagnitude(valueRange), + graphMax = Math.ceil(maxValue / (1 * Math.pow(10, rangeOrderOfMagnitude))) * Math.pow(10, rangeOrderOfMagnitude), + graphMin = (startFromZero) ? 0 : Math.floor(minValue / (1 * Math.pow(10, rangeOrderOfMagnitude))) * Math.pow(10, rangeOrderOfMagnitude), + graphRange = graphMax - graphMin, + stepValue = Math.pow(10, rangeOrderOfMagnitude), + numberOfSteps = Math.round(graphRange / stepValue); + + //If we have more space on the graph we'll use it to give more definition to the data + while((numberOfSteps > maxSteps || (numberOfSteps * 2) < maxSteps) && !skipFitting) { + if(numberOfSteps > maxSteps){ + stepValue *=2; + numberOfSteps = Math.round(graphRange/stepValue); + // Don't ever deal with a decimal number of steps - cancel fitting and just use the minimum number of steps. + if (numberOfSteps % 1 !== 0){ + skipFitting = true; + } + } + //We can fit in double the amount of scale points on the scale + else{ + //If user has declared ints only, and the step value isn't a decimal + if (integersOnly && rangeOrderOfMagnitude >= 0){ + //If the user has said integers only, we need to check that making the scale more granular wouldn't make it a float + if(stepValue/2 % 1 === 0){ + stepValue /=2; + numberOfSteps = Math.round(graphRange/stepValue); + } + //If it would make it a float break out of the loop + else{ + break; + } + } + //If the scale doesn't have to be an int, make the scale more granular anyway. + else{ + stepValue /=2; + numberOfSteps = Math.round(graphRange/stepValue); + } + + } + } + + if (skipFitting){ + numberOfSteps = minSteps; + stepValue = graphRange / numberOfSteps; + } + + return { + steps : numberOfSteps, + stepValue : stepValue, + min : graphMin, + max : graphMin + (numberOfSteps * stepValue) + }; + + }, + /* jshint ignore:start */ + // Blows up jshint errors based on the new Function constructor + //Templating methods + //Javascript micro templating by John Resig - source at http://ejohn.org/blog/javascript-micro-templating/ + template = helpers.template = function(templateString, valuesObject){ + + // If templateString is function rather than string-template - call the function for valuesObject + + if(templateString instanceof Function){ + return templateString(valuesObject); + } + + var cache = {}; + function tmpl(str, data){ + // Figure out if we're getting a template, or if we need to + // load the template - and be sure to cache the result. + var fn = !/\W/.test(str) ? + cache[str] = cache[str] : + + // Generate a reusable function that will serve as a template + // generator (and which will be cached). + new Function("obj", + "var p=[],print=function(){p.push.apply(p,arguments);};" + + + // Introduce the data as local variables using with(){} + "with(obj){p.push('" + + + // Convert the template into pure JavaScript + str + .replace(/[\r\t\n]/g, " ") + .split("<%").join("\t") + .replace(/((^|%>)[^\t]*)'/g, "$1\r") + .replace(/\t=(.*?)%>/g, "',$1,'") + .split("\t").join("');") + .split("%>").join("p.push('") + .split("\r").join("\\'") + + "');}return p.join('');" + ); + + // Provide some basic currying to the user + return data ? fn( data ) : fn; + } + return tmpl(templateString,valuesObject); + }, + /* jshint ignore:end */ + generateLabels = helpers.generateLabels = function(templateString,numberOfSteps,graphMin,stepValue){ + var labelsArray = new Array(numberOfSteps); + if (labelTemplateString){ + each(labelsArray,function(val,index){ + labelsArray[index] = template(templateString,{value: (graphMin + (stepValue*(index+1)))}); + }); + } + return labelsArray; + }, + //--Animation methods + //Easing functions adapted from Robert Penner's easing equations + //http://www.robertpenner.com/easing/ + easingEffects = helpers.easingEffects = { + linear: function (t) { + return t; + }, + easeInQuad: function (t) { + return t * t; + }, + easeOutQuad: function (t) { + return -1 * t * (t - 2); + }, + easeInOutQuad: function (t) { + if ((t /= 1 / 2) < 1) return 1 / 2 * t * t; + return -1 / 2 * ((--t) * (t - 2) - 1); + }, + easeInCubic: function (t) { + return t * t * t; + }, + easeOutCubic: function (t) { + return 1 * ((t = t / 1 - 1) * t * t + 1); + }, + easeInOutCubic: function (t) { + if ((t /= 1 / 2) < 1) return 1 / 2 * t * t * t; + return 1 / 2 * ((t -= 2) * t * t + 2); + }, + easeInQuart: function (t) { + return t * t * t * t; + }, + easeOutQuart: function (t) { + return -1 * ((t = t / 1 - 1) * t * t * t - 1); + }, + easeInOutQuart: function (t) { + if ((t /= 1 / 2) < 1) return 1 / 2 * t * t * t * t; + return -1 / 2 * ((t -= 2) * t * t * t - 2); + }, + easeInQuint: function (t) { + return 1 * (t /= 1) * t * t * t * t; + }, + easeOutQuint: function (t) { + return 1 * ((t = t / 1 - 1) * t * t * t * t + 1); + }, + easeInOutQuint: function (t) { + if ((t /= 1 / 2) < 1) return 1 / 2 * t * t * t * t * t; + return 1 / 2 * ((t -= 2) * t * t * t * t + 2); + }, + easeInSine: function (t) { + return -1 * Math.cos(t / 1 * (Math.PI / 2)) + 1; + }, + easeOutSine: function (t) { + return 1 * Math.sin(t / 1 * (Math.PI / 2)); + }, + easeInOutSine: function (t) { + return -1 / 2 * (Math.cos(Math.PI * t / 1) - 1); + }, + easeInExpo: function (t) { + return (t === 0) ? 1 : 1 * Math.pow(2, 10 * (t / 1 - 1)); + }, + easeOutExpo: function (t) { + return (t === 1) ? 1 : 1 * (-Math.pow(2, -10 * t / 1) + 1); + }, + easeInOutExpo: function (t) { + if (t === 0) return 0; + if (t === 1) return 1; + if ((t /= 1 / 2) < 1) return 1 / 2 * Math.pow(2, 10 * (t - 1)); + return 1 / 2 * (-Math.pow(2, -10 * --t) + 2); + }, + easeInCirc: function (t) { + if (t >= 1) return t; + return -1 * (Math.sqrt(1 - (t /= 1) * t) - 1); + }, + easeOutCirc: function (t) { + return 1 * Math.sqrt(1 - (t = t / 1 - 1) * t); + }, + easeInOutCirc: function (t) { + if ((t /= 1 / 2) < 1) return -1 / 2 * (Math.sqrt(1 - t * t) - 1); + return 1 / 2 * (Math.sqrt(1 - (t -= 2) * t) + 1); + }, + easeInElastic: function (t) { + var s = 1.70158; + var p = 0; + var a = 1; + if (t === 0) return 0; + if ((t /= 1) == 1) return 1; + if (!p) p = 1 * 0.3; + if (a < Math.abs(1)) { + a = 1; + s = p / 4; + } else s = p / (2 * Math.PI) * Math.asin(1 / a); + return -(a * Math.pow(2, 10 * (t -= 1)) * Math.sin((t * 1 - s) * (2 * Math.PI) / p)); + }, + easeOutElastic: function (t) { + var s = 1.70158; + var p = 0; + var a = 1; + if (t === 0) return 0; + if ((t /= 1) == 1) return 1; + if (!p) p = 1 * 0.3; + if (a < Math.abs(1)) { + a = 1; + s = p / 4; + } else s = p / (2 * Math.PI) * Math.asin(1 / a); + return a * Math.pow(2, -10 * t) * Math.sin((t * 1 - s) * (2 * Math.PI) / p) + 1; + }, + easeInOutElastic: function (t) { + var s = 1.70158; + var p = 0; + var a = 1; + if (t === 0) return 0; + if ((t /= 1 / 2) == 2) return 1; + if (!p) p = 1 * (0.3 * 1.5); + if (a < Math.abs(1)) { + a = 1; + s = p / 4; + } else s = p / (2 * Math.PI) * Math.asin(1 / a); + if (t < 1) return -0.5 * (a * Math.pow(2, 10 * (t -= 1)) * Math.sin((t * 1 - s) * (2 * Math.PI) / p)); + return a * Math.pow(2, -10 * (t -= 1)) * Math.sin((t * 1 - s) * (2 * Math.PI) / p) * 0.5 + 1; + }, + easeInBack: function (t) { + var s = 1.70158; + return 1 * (t /= 1) * t * ((s + 1) * t - s); + }, + easeOutBack: function (t) { + var s = 1.70158; + return 1 * ((t = t / 1 - 1) * t * ((s + 1) * t + s) + 1); + }, + easeInOutBack: function (t) { + var s = 1.70158; + if ((t /= 1 / 2) < 1) return 1 / 2 * (t * t * (((s *= (1.525)) + 1) * t - s)); + return 1 / 2 * ((t -= 2) * t * (((s *= (1.525)) + 1) * t + s) + 2); + }, + easeInBounce: function (t) { + return 1 - easingEffects.easeOutBounce(1 - t); + }, + easeOutBounce: function (t) { + if ((t /= 1) < (1 / 2.75)) { + return 1 * (7.5625 * t * t); + } else if (t < (2 / 2.75)) { + return 1 * (7.5625 * (t -= (1.5 / 2.75)) * t + 0.75); + } else if (t < (2.5 / 2.75)) { + return 1 * (7.5625 * (t -= (2.25 / 2.75)) * t + 0.9375); + } else { + return 1 * (7.5625 * (t -= (2.625 / 2.75)) * t + 0.984375); + } + }, + easeInOutBounce: function (t) { + if (t < 1 / 2) return easingEffects.easeInBounce(t * 2) * 0.5; + return easingEffects.easeOutBounce(t * 2 - 1) * 0.5 + 1 * 0.5; + } + }, + //Request animation polyfill - http://www.paulirish.com/2011/requestanimationframe-for-smart-animating/ + requestAnimFrame = helpers.requestAnimFrame = (function(){ + return window.requestAnimationFrame || + window.webkitRequestAnimationFrame || + window.mozRequestAnimationFrame || + window.oRequestAnimationFrame || + window.msRequestAnimationFrame || + function(callback) { + return window.setTimeout(callback, 1000 / 60); + }; + })(), + cancelAnimFrame = helpers.cancelAnimFrame = (function(){ + return window.cancelAnimationFrame || + window.webkitCancelAnimationFrame || + window.mozCancelAnimationFrame || + window.oCancelAnimationFrame || + window.msCancelAnimationFrame || + function(callback) { + return window.clearTimeout(callback, 1000 / 60); + }; + })(), + animationLoop = helpers.animationLoop = function(callback,totalSteps,easingString,onProgress,onComplete,chartInstance){ + + var currentStep = 0, + easingFunction = easingEffects[easingString] || easingEffects.linear; + + var animationFrame = function(){ + currentStep++; + var stepDecimal = currentStep/totalSteps; + var easeDecimal = easingFunction(stepDecimal); + + callback.call(chartInstance,easeDecimal,stepDecimal, currentStep); + onProgress.call(chartInstance,easeDecimal,stepDecimal); + if (currentStep < totalSteps){ + chartInstance.animationFrame = requestAnimFrame(animationFrame); + } else{ + onComplete.apply(chartInstance); + } + }; + requestAnimFrame(animationFrame); + }, + //-- DOM methods + getRelativePosition = helpers.getRelativePosition = function(evt){ + var mouseX, mouseY; + var e = evt.originalEvent || evt, + canvas = evt.currentTarget || evt.srcElement, + boundingRect = canvas.getBoundingClientRect(); + + if (e.touches){ + mouseX = e.touches[0].clientX - boundingRect.left; + mouseY = e.touches[0].clientY - boundingRect.top; + + } + else{ + mouseX = e.clientX - boundingRect.left; + mouseY = e.clientY - boundingRect.top; + } + + return { + x : mouseX, + y : mouseY + }; + + }, + addEvent = helpers.addEvent = function(node,eventType,method){ + if (node.addEventListener){ + node.addEventListener(eventType,method); + } else if (node.attachEvent){ + node.attachEvent("on"+eventType, method); + } else { + node["on"+eventType] = method; + } + }, + removeEvent = helpers.removeEvent = function(node, eventType, handler){ + if (node.removeEventListener){ + node.removeEventListener(eventType, handler, false); + } else if (node.detachEvent){ + node.detachEvent("on"+eventType,handler); + } else{ + node["on" + eventType] = noop; + } + }, + bindEvents = helpers.bindEvents = function(chartInstance, arrayOfEvents, handler){ + // Create the events object if it's not already present + if (!chartInstance.events) chartInstance.events = {}; + + each(arrayOfEvents,function(eventName){ + chartInstance.events[eventName] = function(){ + handler.apply(chartInstance, arguments); + }; + addEvent(chartInstance.chart.canvas,eventName,chartInstance.events[eventName]); + }); + }, + unbindEvents = helpers.unbindEvents = function (chartInstance, arrayOfEvents) { + each(arrayOfEvents, function(handler,eventName){ + removeEvent(chartInstance.chart.canvas, eventName, handler); + }); + }, + getMaximumWidth = helpers.getMaximumWidth = function(domNode){ + var container = domNode.parentNode; + // TODO = check cross browser stuff with this. + return container.clientWidth; + }, + getMaximumHeight = helpers.getMaximumHeight = function(domNode){ + var container = domNode.parentNode; + // TODO = check cross browser stuff with this. + return container.clientHeight; + }, + getMaximumSize = helpers.getMaximumSize = helpers.getMaximumWidth, // legacy support + retinaScale = helpers.retinaScale = function(chart){ + var ctx = chart.ctx, + width = chart.canvas.width, + height = chart.canvas.height; + + if (window.devicePixelRatio) { + ctx.canvas.style.width = width + "px"; + ctx.canvas.style.height = height + "px"; + ctx.canvas.height = height * window.devicePixelRatio; + ctx.canvas.width = width * window.devicePixelRatio; + ctx.scale(window.devicePixelRatio, window.devicePixelRatio); + } + }, + //-- Canvas methods + clear = helpers.clear = function(chart){ + chart.ctx.clearRect(0,0,chart.width,chart.height); + }, + fontString = helpers.fontString = function(pixelSize,fontStyle,fontFamily){ + return fontStyle + " " + pixelSize+"px " + fontFamily; + }, + longestText = helpers.longestText = function(ctx,font,arrayOfStrings){ + ctx.font = font; + var longest = 0; + each(arrayOfStrings,function(string){ + var textWidth = ctx.measureText(string).width; + longest = (textWidth > longest) ? textWidth : longest; + }); + return longest; + }, + drawRoundedRectangle = helpers.drawRoundedRectangle = function(ctx,x,y,width,height,radius){ + ctx.beginPath(); + ctx.moveTo(x + radius, y); + ctx.lineTo(x + width - radius, y); + ctx.quadraticCurveTo(x + width, y, x + width, y + radius); + ctx.lineTo(x + width, y + height - radius); + ctx.quadraticCurveTo(x + width, y + height, x + width - radius, y + height); + ctx.lineTo(x + radius, y + height); + ctx.quadraticCurveTo(x, y + height, x, y + height - radius); + ctx.lineTo(x, y + radius); + ctx.quadraticCurveTo(x, y, x + radius, y); + ctx.closePath(); + }; + + + //Store a reference to each instance - allowing us to globally resize chart instances on window resize. + //Destroy method on the chart will remove the instance of the chart from this reference. + Chart.instances = {}; + + Chart.Type = function(data,options,chart){ + this.options = options; + this.chart = chart; + this.id = uid(); + //Add the chart instance to the global namespace + Chart.instances[this.id] = this; + + // Initialize is always called when a chart type is created + // By default it is a no op, but it should be extended + if (options.responsive){ + this.resize(); + } + this.initialize.call(this,data); + }; + + //Core methods that'll be a part of every chart type + extend(Chart.Type.prototype,{ + initialize : function(){return this;}, + clear : function(){ + clear(this.chart); + return this; + }, + stop : function(){ + // Stops any current animation loop occuring + cancelAnimFrame(this.animationFrame); + return this; + }, + resize : function(callback){ + this.stop(); + var canvas = this.chart.canvas, + newWidth = getMaximumWidth(this.chart.canvas), + newHeight = this.options.maintainAspectRatio ? newWidth / this.chart.aspectRatio : getMaximumHeight(this.chart.canvas); + + canvas.width = this.chart.width = newWidth; + canvas.height = this.chart.height = newHeight; + + retinaScale(this.chart); + + if (typeof callback === "function"){ + callback.apply(this, Array.prototype.slice.call(arguments, 1)); + } + return this; + }, + reflow : noop, + render : function(reflow){ + if (reflow){ + this.reflow(); + } + if (this.options.animation && !reflow){ + helpers.animationLoop( + this.draw, + this.options.animationSteps, + this.options.animationEasing, + this.options.onAnimationProgress, + this.options.onAnimationComplete, + this + ); + } + else{ + this.draw(); + this.options.onAnimationComplete.call(this); + } + return this; + }, + generateLegend : function(){ + return template(this.options.legendTemplate,this); + }, + destroy : function(){ + this.clear(); + unbindEvents(this, this.events); + var canvas = this.chart.canvas; + + // Reset canvas height/width attributes starts a fresh with the canvas context + canvas.width = this.chart.width; + canvas.height = this.chart.height; + + // < IE9 doesn't support removeProperty + if (canvas.style.removeProperty) { + canvas.style.removeProperty('width'); + canvas.style.removeProperty('height'); + } else { + canvas.style.removeAttribute('width'); + canvas.style.removeAttribute('height'); + } + + delete Chart.instances[this.id]; + }, + showTooltip : function(ChartElements, forceRedraw){ + // Only redraw the chart if we've actually changed what we're hovering on. + if (typeof this.activeElements === 'undefined') this.activeElements = []; + + var isChanged = (function(Elements){ + var changed = false; + + if (Elements.length !== this.activeElements.length){ + changed = true; + return changed; + } + + each(Elements, function(element, index){ + if (element !== this.activeElements[index]){ + changed = true; + } + }, this); + return changed; + }).call(this, ChartElements); + + if (!isChanged && !forceRedraw){ + return; + } + else{ + this.activeElements = ChartElements; + } + this.draw(); + if(this.options.customTooltips){ + this.options.customTooltips(false); + } + if (ChartElements.length > 0){ + // If we have multiple datasets, show a MultiTooltip for all of the data points at that index + if (this.datasets && this.datasets.length > 1) { + var dataArray, + dataIndex; + + for (var i = this.datasets.length - 1; i >= 0; i--) { + dataArray = this.datasets[i].points || this.datasets[i].bars || this.datasets[i].segments; + dataIndex = indexOf(dataArray, ChartElements[0]); + if (dataIndex !== -1){ + break; + } + } + var tooltipLabels = [], + tooltipColors = [], + medianPosition = (function(index) { + + // Get all the points at that particular index + var Elements = [], + dataCollection, + xPositions = [], + yPositions = [], + xMax, + yMax, + xMin, + yMin; + helpers.each(this.datasets, function(dataset){ + dataCollection = dataset.points || dataset.bars || dataset.segments; + if (dataCollection[dataIndex] && dataCollection[dataIndex].hasValue()){ + Elements.push(dataCollection[dataIndex]); + } + }); + + helpers.each(Elements, function(element) { + xPositions.push(element.x); + yPositions.push(element.y); + + + //Include any colour information about the element + tooltipLabels.push(helpers.template(this.options.multiTooltipTemplate, element)); + tooltipColors.push({ + fill: element._saved.fillColor || element.fillColor, + stroke: element._saved.strokeColor || element.strokeColor + }); + + }, this); + + yMin = min(yPositions); + yMax = max(yPositions); + + xMin = min(xPositions); + xMax = max(xPositions); + + return { + x: (xMin > this.chart.width/2) ? xMin : xMax, + y: (yMin + yMax)/2 + }; + }).call(this, dataIndex); + + new Chart.MultiTooltip({ + x: medianPosition.x, + y: medianPosition.y, + xPadding: this.options.tooltipXPadding, + yPadding: this.options.tooltipYPadding, + xOffset: this.options.tooltipXOffset, + fillColor: this.options.tooltipFillColor, + textColor: this.options.tooltipFontColor, + fontFamily: this.options.tooltipFontFamily, + fontStyle: this.options.tooltipFontStyle, + fontSize: this.options.tooltipFontSize, + titleTextColor: this.options.tooltipTitleFontColor, + titleFontFamily: this.options.tooltipTitleFontFamily, + titleFontStyle: this.options.tooltipTitleFontStyle, + titleFontSize: this.options.tooltipTitleFontSize, + cornerRadius: this.options.tooltipCornerRadius, + labels: tooltipLabels, + legendColors: tooltipColors, + legendColorBackground : this.options.multiTooltipKeyBackground, + title: ChartElements[0].label, + chart: this.chart, + ctx: this.chart.ctx, + custom: this.options.customTooltips + }).draw(); + + } else { + each(ChartElements, function(Element) { + var tooltipPosition = Element.tooltipPosition(); + new Chart.Tooltip({ + x: Math.round(tooltipPosition.x), + y: Math.round(tooltipPosition.y), + xPadding: this.options.tooltipXPadding, + yPadding: this.options.tooltipYPadding, + fillColor: this.options.tooltipFillColor, + textColor: this.options.tooltipFontColor, + fontFamily: this.options.tooltipFontFamily, + fontStyle: this.options.tooltipFontStyle, + fontSize: this.options.tooltipFontSize, + caretHeight: this.options.tooltipCaretSize, + cornerRadius: this.options.tooltipCornerRadius, + text: template(this.options.tooltipTemplate, Element), + chart: this.chart, + custom: this.options.customTooltips + }).draw(); + }, this); + } + } + return this; + }, + toBase64Image : function(){ + return this.chart.canvas.toDataURL.apply(this.chart.canvas, arguments); + } + }); + + Chart.Type.extend = function(extensions){ + + var parent = this; + + var ChartType = function(){ + return parent.apply(this,arguments); + }; + + //Copy the prototype object of the this class + ChartType.prototype = clone(parent.prototype); + //Now overwrite some of the properties in the base class with the new extensions + extend(ChartType.prototype, extensions); + + ChartType.extend = Chart.Type.extend; + + if (extensions.name || parent.prototype.name){ + + var chartName = extensions.name || parent.prototype.name; + //Assign any potential default values of the new chart type + + //If none are defined, we'll use a clone of the chart type this is being extended from. + //I.e. if we extend a line chart, we'll use the defaults from the line chart if our new chart + //doesn't define some defaults of their own. + + var baseDefaults = (Chart.defaults[parent.prototype.name]) ? clone(Chart.defaults[parent.prototype.name]) : {}; + + Chart.defaults[chartName] = extend(baseDefaults,extensions.defaults); + + Chart.types[chartName] = ChartType; + + //Register this new chart type in the Chart prototype + Chart.prototype[chartName] = function(data,options){ + var config = merge(Chart.defaults.global, Chart.defaults[chartName], options || {}); + return new ChartType(data,config,this); + }; + } else{ + warn("Name not provided for this chart, so it hasn't been registered"); + } + return parent; + }; + + Chart.Element = function(configuration){ + extend(this,configuration); + this.initialize.apply(this,arguments); + this.save(); + }; + extend(Chart.Element.prototype,{ + initialize : function(){}, + restore : function(props){ + if (!props){ + extend(this,this._saved); + } else { + each(props,function(key){ + this[key] = this._saved[key]; + },this); + } + return this; + }, + save : function(){ + this._saved = clone(this); + delete this._saved._saved; + return this; + }, + update : function(newProps){ + each(newProps,function(value,key){ + this._saved[key] = this[key]; + this[key] = value; + },this); + return this; + }, + transition : function(props,ease){ + each(props,function(value,key){ + this[key] = ((value - this._saved[key]) * ease) + this._saved[key]; + },this); + return this; + }, + tooltipPosition : function(){ + return { + x : this.x, + y : this.y + }; + }, + hasValue: function(){ + return isNumber(this.value); + } + }); + + Chart.Element.extend = inherits; + + + Chart.Point = Chart.Element.extend({ + display: true, + inRange: function(chartX,chartY){ + var hitDetectionRange = this.hitDetectionRadius + this.radius; + return ((Math.pow(chartX-this.x, 2)+Math.pow(chartY-this.y, 2)) < Math.pow(hitDetectionRange,2)); + }, + draw : function(){ + if (this.display){ + var ctx = this.ctx; + ctx.beginPath(); + + ctx.arc(this.x, this.y, this.radius, 0, Math.PI*2); + ctx.closePath(); + + ctx.strokeStyle = this.strokeColor; + ctx.lineWidth = this.strokeWidth; + + ctx.fillStyle = this.fillColor; + + ctx.fill(); + ctx.stroke(); + } + + + //Quick debug for bezier curve splining + //Highlights control points and the line between them. + //Handy for dev - stripped in the min version. + + // ctx.save(); + // ctx.fillStyle = "black"; + // ctx.strokeStyle = "black" + // ctx.beginPath(); + // ctx.arc(this.controlPoints.inner.x,this.controlPoints.inner.y, 2, 0, Math.PI*2); + // ctx.fill(); + + // ctx.beginPath(); + // ctx.arc(this.controlPoints.outer.x,this.controlPoints.outer.y, 2, 0, Math.PI*2); + // ctx.fill(); + + // ctx.moveTo(this.controlPoints.inner.x,this.controlPoints.inner.y); + // ctx.lineTo(this.x, this.y); + // ctx.lineTo(this.controlPoints.outer.x,this.controlPoints.outer.y); + // ctx.stroke(); + + // ctx.restore(); + + + + } + }); + + Chart.Arc = Chart.Element.extend({ + inRange : function(chartX,chartY){ + + var pointRelativePosition = helpers.getAngleFromPoint(this, { + x: chartX, + y: chartY + }); + + //Check if within the range of the open/close angle + var betweenAngles = (pointRelativePosition.angle >= this.startAngle && pointRelativePosition.angle <= this.endAngle), + withinRadius = (pointRelativePosition.distance >= this.innerRadius && pointRelativePosition.distance <= this.outerRadius); + + return (betweenAngles && withinRadius); + //Ensure within the outside of the arc centre, but inside arc outer + }, + tooltipPosition : function(){ + var centreAngle = this.startAngle + ((this.endAngle - this.startAngle) / 2), + rangeFromCentre = (this.outerRadius - this.innerRadius) / 2 + this.innerRadius; + return { + x : this.x + (Math.cos(centreAngle) * rangeFromCentre), + y : this.y + (Math.sin(centreAngle) * rangeFromCentre) + }; + }, + draw : function(animationPercent){ + + var easingDecimal = animationPercent || 1; + + var ctx = this.ctx; + + ctx.beginPath(); + + ctx.arc(this.x, this.y, this.outerRadius, this.startAngle, this.endAngle); + + ctx.arc(this.x, this.y, this.innerRadius, this.endAngle, this.startAngle, true); + + ctx.closePath(); + ctx.strokeStyle = this.strokeColor; + ctx.lineWidth = this.strokeWidth; + + ctx.fillStyle = this.fillColor; + + ctx.fill(); + ctx.lineJoin = 'bevel'; + + if (this.showStroke){ + ctx.stroke(); + } + } + }); + + Chart.Rectangle = Chart.Element.extend({ + draw : function(){ + var ctx = this.ctx, + halfWidth = this.width/2, + leftX = this.x - halfWidth, + rightX = this.x + halfWidth, + top = this.base - (this.base - this.y), + halfStroke = this.strokeWidth / 2; + + // Canvas doesn't allow us to stroke inside the width so we can + // adjust the sizes to fit if we're setting a stroke on the line + if (this.showStroke){ + leftX += halfStroke; + rightX -= halfStroke; + top += halfStroke; + } + + ctx.beginPath(); + + ctx.fillStyle = this.fillColor; + ctx.strokeStyle = this.strokeColor; + ctx.lineWidth = this.strokeWidth; + + // It'd be nice to keep this class totally generic to any rectangle + // and simply specify which border to miss out. + ctx.moveTo(leftX, this.base); + ctx.lineTo(leftX, top); + ctx.lineTo(rightX, top); + ctx.lineTo(rightX, this.base); + ctx.fill(); + if (this.showStroke){ + ctx.stroke(); + } + }, + height : function(){ + return this.base - this.y; + }, + inRange : function(chartX,chartY){ + return (chartX >= this.x - this.width/2 && chartX <= this.x + this.width/2) && (chartY >= this.y && chartY <= this.base); + } + }); + + Chart.Tooltip = Chart.Element.extend({ + draw : function(){ + + var ctx = this.chart.ctx; + + ctx.font = fontString(this.fontSize,this.fontStyle,this.fontFamily); + + this.xAlign = "center"; + this.yAlign = "above"; + + //Distance between the actual element.y position and the start of the tooltip caret + var caretPadding = this.caretPadding = 2; + + var tooltipWidth = ctx.measureText(this.text).width + 2*this.xPadding, + tooltipRectHeight = this.fontSize + 2*this.yPadding, + tooltipHeight = tooltipRectHeight + this.caretHeight + caretPadding; + + if (this.x + tooltipWidth/2 >this.chart.width){ + this.xAlign = "left"; + } else if (this.x - tooltipWidth/2 < 0){ + this.xAlign = "right"; + } + + if (this.y - tooltipHeight < 0){ + this.yAlign = "below"; + } + + + var tooltipX = this.x - tooltipWidth/2, + tooltipY = this.y - tooltipHeight; + + ctx.fillStyle = this.fillColor; + + // Custom Tooltips + if(this.custom){ + this.custom(this); + } + else{ + switch(this.yAlign) + { + case "above": + //Draw a caret above the x/y + ctx.beginPath(); + ctx.moveTo(this.x,this.y - caretPadding); + ctx.lineTo(this.x + this.caretHeight, this.y - (caretPadding + this.caretHeight)); + ctx.lineTo(this.x - this.caretHeight, this.y - (caretPadding + this.caretHeight)); + ctx.closePath(); + ctx.fill(); + break; + case "below": + tooltipY = this.y + caretPadding + this.caretHeight; + //Draw a caret below the x/y + ctx.beginPath(); + ctx.moveTo(this.x, this.y + caretPadding); + ctx.lineTo(this.x + this.caretHeight, this.y + caretPadding + this.caretHeight); + ctx.lineTo(this.x - this.caretHeight, this.y + caretPadding + this.caretHeight); + ctx.closePath(); + ctx.fill(); + break; + } + + switch(this.xAlign) + { + case "left": + tooltipX = this.x - tooltipWidth + (this.cornerRadius + this.caretHeight); + break; + case "right": + tooltipX = this.x - (this.cornerRadius + this.caretHeight); + break; + } + + drawRoundedRectangle(ctx,tooltipX,tooltipY,tooltipWidth,tooltipRectHeight,this.cornerRadius); + + ctx.fill(); + + ctx.fillStyle = this.textColor; + ctx.textAlign = "center"; + ctx.textBaseline = "middle"; + ctx.fillText(this.text, tooltipX + tooltipWidth/2, tooltipY + tooltipRectHeight/2); + } + } + }); + + Chart.MultiTooltip = Chart.Element.extend({ + initialize : function(){ + this.font = fontString(this.fontSize,this.fontStyle,this.fontFamily); + + this.titleFont = fontString(this.titleFontSize,this.titleFontStyle,this.titleFontFamily); + + this.height = (this.labels.length * this.fontSize) + ((this.labels.length-1) * (this.fontSize/2)) + (this.yPadding*2) + this.titleFontSize *1.5; + + this.ctx.font = this.titleFont; + + var titleWidth = this.ctx.measureText(this.title).width, + //Label has a legend square as well so account for this. + labelWidth = longestText(this.ctx,this.font,this.labels) + this.fontSize + 3, + longestTextWidth = max([labelWidth,titleWidth]); + + this.width = longestTextWidth + (this.xPadding*2); + + + var halfHeight = this.height/2; + + //Check to ensure the height will fit on the canvas + if (this.y - halfHeight < 0 ){ + this.y = halfHeight; + } else if (this.y + halfHeight > this.chart.height){ + this.y = this.chart.height - halfHeight; + } + + //Decide whether to align left or right based on position on canvas + if (this.x > this.chart.width/2){ + this.x -= this.xOffset + this.width; + } else { + this.x += this.xOffset; + } + + + }, + getLineHeight : function(index){ + var baseLineHeight = this.y - (this.height/2) + this.yPadding, + afterTitleIndex = index-1; + + //If the index is zero, we're getting the title + if (index === 0){ + return baseLineHeight + this.titleFontSize/2; + } else{ + return baseLineHeight + ((this.fontSize*1.5*afterTitleIndex) + this.fontSize/2) + this.titleFontSize * 1.5; + } + + }, + draw : function(){ + // Custom Tooltips + if(this.custom){ + this.custom(this); + } + else{ + drawRoundedRectangle(this.ctx,this.x,this.y - this.height/2,this.width,this.height,this.cornerRadius); + var ctx = this.ctx; + ctx.fillStyle = this.fillColor; + ctx.fill(); + ctx.closePath(); + + ctx.textAlign = "left"; + ctx.textBaseline = "middle"; + ctx.fillStyle = this.titleTextColor; + ctx.font = this.titleFont; + + ctx.fillText(this.title,this.x + this.xPadding, this.getLineHeight(0)); + + ctx.font = this.font; + helpers.each(this.labels,function(label,index){ + ctx.fillStyle = this.textColor; + ctx.fillText(label,this.x + this.xPadding + this.fontSize + 3, this.getLineHeight(index + 1)); + + //A bit gnarly, but clearing this rectangle breaks when using explorercanvas (clears whole canvas) + //ctx.clearRect(this.x + this.xPadding, this.getLineHeight(index + 1) - this.fontSize/2, this.fontSize, this.fontSize); + //Instead we'll make a white filled block to put the legendColour palette over. + + ctx.fillStyle = this.legendColorBackground; + ctx.fillRect(this.x + this.xPadding, this.getLineHeight(index + 1) - this.fontSize/2, this.fontSize, this.fontSize); + + ctx.fillStyle = this.legendColors[index].fill; + ctx.fillRect(this.x + this.xPadding, this.getLineHeight(index + 1) - this.fontSize/2, this.fontSize, this.fontSize); + + + },this); + } + } + }); + + Chart.Scale = Chart.Element.extend({ + initialize : function(){ + this.fit(); + }, + buildYLabels : function(){ + this.yLabels = []; + + var stepDecimalPlaces = getDecimalPlaces(this.stepValue); + + for (var i=0; i<=this.steps; i++){ + this.yLabels.push(template(this.templateString,{value:(this.min + (i * this.stepValue)).toFixed(stepDecimalPlaces)})); + } + this.yLabelWidth = (this.display && this.showLabels) ? longestText(this.ctx,this.font,this.yLabels) : 0; + }, + addXLabel : function(label){ + this.xLabels.push(label); + this.valuesCount++; + this.fit(); + }, + removeXLabel : function(){ + this.xLabels.shift(); + this.valuesCount--; + this.fit(); + }, + // Fitting loop to rotate x Labels and figure out what fits there, and also calculate how many Y steps to use + fit: function(){ + // First we need the width of the yLabels, assuming the xLabels aren't rotated + + // To do that we need the base line at the top and base of the chart, assuming there is no x label rotation + this.startPoint = (this.display) ? this.fontSize : 0; + this.endPoint = (this.display) ? this.height - (this.fontSize * 1.5) - 5 : this.height; // -5 to pad labels + + // Apply padding settings to the start and end point. + this.startPoint += this.padding; + this.endPoint -= this.padding; + + // Cache the starting height, so can determine if we need to recalculate the scale yAxis + var cachedHeight = this.endPoint - this.startPoint, + cachedYLabelWidth; + + // Build the current yLabels so we have an idea of what size they'll be to start + /* + * This sets what is returned from calculateScaleRange as static properties of this class: + * + this.steps; + this.stepValue; + this.min; + this.max; + * + */ + this.calculateYRange(cachedHeight); + + // With these properties set we can now build the array of yLabels + // and also the width of the largest yLabel + this.buildYLabels(); + + this.calculateXLabelRotation(); + + while((cachedHeight > this.endPoint - this.startPoint)){ + cachedHeight = this.endPoint - this.startPoint; + cachedYLabelWidth = this.yLabelWidth; + + this.calculateYRange(cachedHeight); + this.buildYLabels(); + + // Only go through the xLabel loop again if the yLabel width has changed + if (cachedYLabelWidth < this.yLabelWidth){ + this.calculateXLabelRotation(); + } + } + + }, + calculateXLabelRotation : function(){ + //Get the width of each grid by calculating the difference + //between x offsets between 0 and 1. + + this.ctx.font = this.font; + + var firstWidth = this.ctx.measureText(this.xLabels[0]).width, + lastWidth = this.ctx.measureText(this.xLabels[this.xLabels.length - 1]).width, + firstRotated, + lastRotated; + + + this.xScalePaddingRight = lastWidth/2 + 3; + this.xScalePaddingLeft = (firstWidth/2 > this.yLabelWidth + 10) ? firstWidth/2 : this.yLabelWidth + 10; + + this.xLabelRotation = 0; + if (this.display){ + var originalLabelWidth = longestText(this.ctx,this.font,this.xLabels), + cosRotation, + firstRotatedWidth; + this.xLabelWidth = originalLabelWidth; + //Allow 3 pixels x2 padding either side for label readability + var xGridWidth = Math.floor(this.calculateX(1) - this.calculateX(0)) - 6; + + //Max label rotate should be 90 - also act as a loop counter + while ((this.xLabelWidth > xGridWidth && this.xLabelRotation === 0) || (this.xLabelWidth > xGridWidth && this.xLabelRotation <= 90 && this.xLabelRotation > 0)){ + cosRotation = Math.cos(toRadians(this.xLabelRotation)); + + firstRotated = cosRotation * firstWidth; + lastRotated = cosRotation * lastWidth; + + // We're right aligning the text now. + if (firstRotated + this.fontSize / 2 > this.yLabelWidth + 8){ + this.xScalePaddingLeft = firstRotated + this.fontSize / 2; + } + this.xScalePaddingRight = this.fontSize/2; + + + this.xLabelRotation++; + this.xLabelWidth = cosRotation * originalLabelWidth; + + } + if (this.xLabelRotation > 0){ + this.endPoint -= Math.sin(toRadians(this.xLabelRotation))*originalLabelWidth + 3; + } + } + else{ + this.xLabelWidth = 0; + this.xScalePaddingRight = this.padding; + this.xScalePaddingLeft = this.padding; + } + + }, + // Needs to be overidden in each Chart type + // Otherwise we need to pass all the data into the scale class + calculateYRange: noop, + drawingArea: function(){ + return this.startPoint - this.endPoint; + }, + calculateY : function(value){ + var scalingFactor = this.drawingArea() / (this.min - this.max); + return this.endPoint - (scalingFactor * (value - this.min)); + }, + calculateX : function(index){ + var isRotated = (this.xLabelRotation > 0), + // innerWidth = (this.offsetGridLines) ? this.width - offsetLeft - this.padding : this.width - (offsetLeft + halfLabelWidth * 2) - this.padding, + innerWidth = this.width - (this.xScalePaddingLeft + this.xScalePaddingRight), + valueWidth = innerWidth/Math.max((this.valuesCount - ((this.offsetGridLines) ? 0 : 1)), 1), + valueOffset = (valueWidth * index) + this.xScalePaddingLeft; + + if (this.offsetGridLines){ + valueOffset += (valueWidth/2); + } + + return Math.round(valueOffset); + }, + update : function(newProps){ + helpers.extend(this, newProps); + this.fit(); + }, + draw : function(){ + var ctx = this.ctx, + yLabelGap = (this.endPoint - this.startPoint) / this.steps, + xStart = Math.round(this.xScalePaddingLeft); + if (this.display){ + ctx.fillStyle = this.textColor; + ctx.font = this.font; + each(this.yLabels,function(labelString,index){ + var yLabelCenter = this.endPoint - (yLabelGap * index), + linePositionY = Math.round(yLabelCenter), + drawHorizontalLine = this.showHorizontalLines; + + ctx.textAlign = "right"; + ctx.textBaseline = "middle"; + if (this.showLabels){ + ctx.fillText(labelString,xStart - 10,yLabelCenter); + } + + // This is X axis, so draw it + if (index === 0 && !drawHorizontalLine){ + drawHorizontalLine = true; + } + + if (drawHorizontalLine){ + ctx.beginPath(); + } + + if (index > 0){ + // This is a grid line in the centre, so drop that + ctx.lineWidth = this.gridLineWidth; + ctx.strokeStyle = this.gridLineColor; + } else { + // This is the first line on the scale + ctx.lineWidth = this.lineWidth; + ctx.strokeStyle = this.lineColor; + } + + linePositionY += helpers.aliasPixel(ctx.lineWidth); + + if(drawHorizontalLine){ + ctx.moveTo(xStart, linePositionY); + ctx.lineTo(this.width, linePositionY); + ctx.stroke(); + ctx.closePath(); + } + + ctx.lineWidth = this.lineWidth; + ctx.strokeStyle = this.lineColor; + ctx.beginPath(); + ctx.moveTo(xStart - 5, linePositionY); + ctx.lineTo(xStart, linePositionY); + ctx.stroke(); + ctx.closePath(); + + },this); + + each(this.xLabels,function(label,index){ + var xPos = this.calculateX(index) + aliasPixel(this.lineWidth), + // Check to see if line/bar here and decide where to place the line + linePos = this.calculateX(index - (this.offsetGridLines ? 0.5 : 0)) + aliasPixel(this.lineWidth), + isRotated = (this.xLabelRotation > 0), + drawVerticalLine = this.showVerticalLines; + + // This is Y axis, so draw it + if (index === 0 && !drawVerticalLine){ + drawVerticalLine = true; + } + + if (drawVerticalLine){ + ctx.beginPath(); + } + + if (index > 0){ + // This is a grid line in the centre, so drop that + ctx.lineWidth = this.gridLineWidth; + ctx.strokeStyle = this.gridLineColor; + } else { + // This is the first line on the scale + ctx.lineWidth = this.lineWidth; + ctx.strokeStyle = this.lineColor; + } + + if (drawVerticalLine){ + ctx.moveTo(linePos,this.endPoint); + ctx.lineTo(linePos,this.startPoint - 3); + ctx.stroke(); + ctx.closePath(); + } + + + ctx.lineWidth = this.lineWidth; + ctx.strokeStyle = this.lineColor; + + + // Small lines at the bottom of the base grid line + ctx.beginPath(); + ctx.moveTo(linePos,this.endPoint); + ctx.lineTo(linePos,this.endPoint + 5); + ctx.stroke(); + ctx.closePath(); + + ctx.save(); + ctx.translate(xPos,(isRotated) ? this.endPoint + 12 : this.endPoint + 8); + ctx.rotate(toRadians(this.xLabelRotation)*-1); + ctx.font = this.font; + ctx.textAlign = (isRotated) ? "right" : "center"; + ctx.textBaseline = (isRotated) ? "middle" : "top"; + ctx.fillText(label, 0, 0); + ctx.restore(); + },this); + + } + } + + }); + + Chart.RadialScale = Chart.Element.extend({ + initialize: function(){ + this.size = min([this.height, this.width]); + this.drawingArea = (this.display) ? (this.size/2) - (this.fontSize/2 + this.backdropPaddingY) : (this.size/2); + }, + calculateCenterOffset: function(value){ + // Take into account half font size + the yPadding of the top value + var scalingFactor = this.drawingArea / (this.max - this.min); + + return (value - this.min) * scalingFactor; + }, + update : function(){ + if (!this.lineArc){ + this.setScaleSize(); + } else { + this.drawingArea = (this.display) ? (this.size/2) - (this.fontSize/2 + this.backdropPaddingY) : (this.size/2); + } + this.buildYLabels(); + }, + buildYLabels: function(){ + this.yLabels = []; + + var stepDecimalPlaces = getDecimalPlaces(this.stepValue); + + for (var i=0; i<=this.steps; i++){ + this.yLabels.push(template(this.templateString,{value:(this.min + (i * this.stepValue)).toFixed(stepDecimalPlaces)})); + } + }, + getCircumference : function(){ + return ((Math.PI*2) / this.valuesCount); + }, + setScaleSize: function(){ + /* + * Right, this is really confusing and there is a lot of maths going on here + * The gist of the problem is here: https://gist.github.com/nnnick/696cc9c55f4b0beb8fe9 + * + * Reaction: https://dl.dropboxusercontent.com/u/34601363/toomuchscience.gif + * + * Solution: + * + * We assume the radius of the polygon is half the size of the canvas at first + * at each index we check if the text overlaps. + * + * Where it does, we store that angle and that index. + * + * After finding the largest index and angle we calculate how much we need to remove + * from the shape radius to move the point inwards by that x. + * + * We average the left and right distances to get the maximum shape radius that can fit in the box + * along with labels. + * + * Once we have that, we can find the centre point for the chart, by taking the x text protrusion + * on each side, removing that from the size, halving it and adding the left x protrusion width. + * + * This will mean we have a shape fitted to the canvas, as large as it can be with the labels + * and position it in the most space efficient manner + * + * https://dl.dropboxusercontent.com/u/34601363/yeahscience.gif + */ + + + // Get maximum radius of the polygon. Either half the height (minus the text width) or half the width. + // Use this to calculate the offset + change. - Make sure L/R protrusion is at least 0 to stop issues with centre points + var largestPossibleRadius = min([(this.height/2 - this.pointLabelFontSize - 5), this.width/2]), + pointPosition, + i, + textWidth, + halfTextWidth, + furthestRight = this.width, + furthestRightIndex, + furthestRightAngle, + furthestLeft = 0, + furthestLeftIndex, + furthestLeftAngle, + xProtrusionLeft, + xProtrusionRight, + radiusReductionRight, + radiusReductionLeft, + maxWidthRadius; + this.ctx.font = fontString(this.pointLabelFontSize,this.pointLabelFontStyle,this.pointLabelFontFamily); + for (i=0;i furthestRight) { + furthestRight = pointPosition.x + halfTextWidth; + furthestRightIndex = i; + } + if (pointPosition.x - halfTextWidth < furthestLeft) { + furthestLeft = pointPosition.x - halfTextWidth; + furthestLeftIndex = i; + } + } + else if (i < this.valuesCount/2) { + // Less than half the values means we'll left align the text + if (pointPosition.x + textWidth > furthestRight) { + furthestRight = pointPosition.x + textWidth; + furthestRightIndex = i; + } + } + else if (i > this.valuesCount/2){ + // More than half the values means we'll right align the text + if (pointPosition.x - textWidth < furthestLeft) { + furthestLeft = pointPosition.x - textWidth; + furthestLeftIndex = i; + } + } + } + + xProtrusionLeft = furthestLeft; + + xProtrusionRight = Math.ceil(furthestRight - this.width); + + furthestRightAngle = this.getIndexAngle(furthestRightIndex); + + furthestLeftAngle = this.getIndexAngle(furthestLeftIndex); + + radiusReductionRight = xProtrusionRight / Math.sin(furthestRightAngle + Math.PI/2); + + radiusReductionLeft = xProtrusionLeft / Math.sin(furthestLeftAngle + Math.PI/2); + + // Ensure we actually need to reduce the size of the chart + radiusReductionRight = (isNumber(radiusReductionRight)) ? radiusReductionRight : 0; + radiusReductionLeft = (isNumber(radiusReductionLeft)) ? radiusReductionLeft : 0; + + this.drawingArea = largestPossibleRadius - (radiusReductionLeft + radiusReductionRight)/2; + + //this.drawingArea = min([maxWidthRadius, (this.height - (2 * (this.pointLabelFontSize + 5)))/2]) + this.setCenterPoint(radiusReductionLeft, radiusReductionRight); + + }, + setCenterPoint: function(leftMovement, rightMovement){ + + var maxRight = this.width - rightMovement - this.drawingArea, + maxLeft = leftMovement + this.drawingArea; + + this.xCenter = (maxLeft + maxRight)/2; + // Always vertically in the centre as the text height doesn't change + this.yCenter = (this.height/2); + }, + + getIndexAngle : function(index){ + var angleMultiplier = (Math.PI * 2) / this.valuesCount; + // Start from the top instead of right, so remove a quarter of the circle + + return index * angleMultiplier - (Math.PI/2); + }, + getPointPosition : function(index, distanceFromCenter){ + var thisAngle = this.getIndexAngle(index); + return { + x : (Math.cos(thisAngle) * distanceFromCenter) + this.xCenter, + y : (Math.sin(thisAngle) * distanceFromCenter) + this.yCenter + }; + }, + draw: function(){ + if (this.display){ + var ctx = this.ctx; + each(this.yLabels, function(label, index){ + // Don't draw a centre value + if (index > 0){ + var yCenterOffset = index * (this.drawingArea/this.steps), + yHeight = this.yCenter - yCenterOffset, + pointPosition; + + // Draw circular lines around the scale + if (this.lineWidth > 0){ + ctx.strokeStyle = this.lineColor; + ctx.lineWidth = this.lineWidth; + + if(this.lineArc){ + ctx.beginPath(); + ctx.arc(this.xCenter, this.yCenter, yCenterOffset, 0, Math.PI*2); + ctx.closePath(); + ctx.stroke(); + } else{ + ctx.beginPath(); + for (var i=0;i= 0; i--) { + if (this.angleLineWidth > 0){ + var outerPosition = this.getPointPosition(i, this.calculateCenterOffset(this.max)); + ctx.beginPath(); + ctx.moveTo(this.xCenter, this.yCenter); + ctx.lineTo(outerPosition.x, outerPosition.y); + ctx.stroke(); + ctx.closePath(); + } + // Extra 3px out for some label spacing + var pointLabelPosition = this.getPointPosition(i, this.calculateCenterOffset(this.max) + 5); + ctx.font = fontString(this.pointLabelFontSize,this.pointLabelFontStyle,this.pointLabelFontFamily); + ctx.fillStyle = this.pointLabelFontColor; + + var labelsCount = this.labels.length, + halfLabelsCount = this.labels.length/2, + quarterLabelsCount = halfLabelsCount/2, + upperHalf = (i < quarterLabelsCount || i > labelsCount - quarterLabelsCount), + exactQuarter = (i === quarterLabelsCount || i === labelsCount - quarterLabelsCount); + if (i === 0){ + ctx.textAlign = 'center'; + } else if(i === halfLabelsCount){ + ctx.textAlign = 'center'; + } else if (i < halfLabelsCount){ + ctx.textAlign = 'left'; + } else { + ctx.textAlign = 'right'; + } + + // Set the correct text baseline based on outer positioning + if (exactQuarter){ + ctx.textBaseline = 'middle'; + } else if (upperHalf){ + ctx.textBaseline = 'bottom'; + } else { + ctx.textBaseline = 'top'; + } + + ctx.fillText(this.labels[i], pointLabelPosition.x, pointLabelPosition.y); + } + } + } + } + }); + + // Attach global event to resize each chart instance when the browser resizes + helpers.addEvent(window, "resize", (function(){ + // Basic debounce of resize function so it doesn't hurt performance when resizing browser. + var timeout; + return function(){ + clearTimeout(timeout); + timeout = setTimeout(function(){ + each(Chart.instances,function(instance){ + // If the responsive flag is set in the chart instance config + // Cascade the resize event down to the chart. + if (instance.options.responsive){ + instance.resize(instance.render, true); + } + }); + }, 50); + }; + })()); + + + if (amd) { + define(function(){ + return Chart; + }); + } else if (typeof module === 'object' && module.exports) { + module.exports = Chart; + } + + root.Chart = Chart; + + Chart.noConflict = function(){ + root.Chart = previous; + return Chart; + }; + +}).call(this); + +(function(){ + "use strict"; + + var root = this, + Chart = root.Chart, + helpers = Chart.helpers; + + + var defaultConfig = { + //Boolean - Whether the scale should start at zero, or an order of magnitude down from the lowest value + scaleBeginAtZero : true, + + //Boolean - Whether grid lines are shown across the chart + scaleShowGridLines : true, + + //String - Colour of the grid lines + scaleGridLineColor : "rgba(0,0,0,.05)", + + //Number - Width of the grid lines + scaleGridLineWidth : 1, + + //Boolean - Whether to show horizontal lines (except X axis) + scaleShowHorizontalLines: true, + + //Boolean - Whether to show vertical lines (except Y axis) + scaleShowVerticalLines: true, + + //Boolean - If there is a stroke on each bar + barShowStroke : true, + + //Number - Pixel width of the bar stroke + barStrokeWidth : 2, + + //Number - Spacing between each of the X value sets + barValueSpacing : 5, + + //Number - Spacing between data sets within X values + barDatasetSpacing : 1, + + //String - A legend template + legendTemplate : "

" + + }; + + + Chart.Type.extend({ + name: "Bar", + defaults : defaultConfig, + initialize: function(data){ + + //Expose options as a scope variable here so we can access it in the ScaleClass + var options = this.options; + + this.ScaleClass = Chart.Scale.extend({ + offsetGridLines : true, + calculateBarX : function(datasetCount, datasetIndex, barIndex){ + //Reusable method for calculating the xPosition of a given bar based on datasetIndex & width of the bar + var xWidth = this.calculateBaseWidth(), + xAbsolute = this.calculateX(barIndex) - (xWidth/2), + barWidth = this.calculateBarWidth(datasetCount); + + return xAbsolute + (barWidth * datasetIndex) + (datasetIndex * options.barDatasetSpacing) + barWidth/2; + }, + calculateBaseWidth : function(){ + return (this.calculateX(1) - this.calculateX(0)) - (2*options.barValueSpacing); + }, + calculateBarWidth : function(datasetCount){ + //The padding between datasets is to the right of each bar, providing that there are more than 1 dataset + var baseWidth = this.calculateBaseWidth() - ((datasetCount - 1) * options.barDatasetSpacing); + + return (baseWidth / datasetCount); + } + }); + + this.datasets = []; + + //Set up tooltip events on the chart + if (this.options.showTooltips){ + helpers.bindEvents(this, this.options.tooltipEvents, function(evt){ + var activeBars = (evt.type !== 'mouseout') ? this.getBarsAtEvent(evt) : []; + + this.eachBars(function(bar){ + bar.restore(['fillColor', 'strokeColor']); + }); + helpers.each(activeBars, function(activeBar){ + activeBar.fillColor = activeBar.highlightFill; + activeBar.strokeColor = activeBar.highlightStroke; + }); + this.showTooltip(activeBars); + }); + } + + //Declare the extension of the default point, to cater for the options passed in to the constructor + this.BarClass = Chart.Rectangle.extend({ + strokeWidth : this.options.barStrokeWidth, + showStroke : this.options.barShowStroke, + ctx : this.chart.ctx + }); + + //Iterate through each of the datasets, and build this into a property of the chart + helpers.each(data.datasets,function(dataset,datasetIndex){ + + var datasetObject = { + label : dataset.label || null, + fillColor : dataset.fillColor, + strokeColor : dataset.strokeColor, + bars : [] + }; + + this.datasets.push(datasetObject); + + helpers.each(dataset.data,function(dataPoint,index){ + //Add a new point for each piece of data, passing any required data to draw. + datasetObject.bars.push(new this.BarClass({ + value : dataPoint, + label : data.labels[index], + datasetLabel: dataset.label, + strokeColor : dataset.strokeColor, + fillColor : dataset.fillColor, + highlightFill : dataset.highlightFill || dataset.fillColor, + highlightStroke : dataset.highlightStroke || dataset.strokeColor + })); + },this); + + },this); + + this.buildScale(data.labels); + + this.BarClass.prototype.base = this.scale.endPoint; + + this.eachBars(function(bar, index, datasetIndex){ + helpers.extend(bar, { + width : this.scale.calculateBarWidth(this.datasets.length), + x: this.scale.calculateBarX(this.datasets.length, datasetIndex, index), + y: this.scale.endPoint + }); + bar.save(); + }, this); + + this.render(); + }, + update : function(){ + this.scale.update(); + // Reset any highlight colours before updating. + helpers.each(this.activeElements, function(activeElement){ + activeElement.restore(['fillColor', 'strokeColor']); + }); + + this.eachBars(function(bar){ + bar.save(); + }); + this.render(); + }, + eachBars : function(callback){ + helpers.each(this.datasets,function(dataset, datasetIndex){ + helpers.each(dataset.bars, callback, this, datasetIndex); + },this); + }, + getBarsAtEvent : function(e){ + var barsArray = [], + eventPosition = helpers.getRelativePosition(e), + datasetIterator = function(dataset){ + barsArray.push(dataset.bars[barIndex]); + }, + barIndex; + + for (var datasetIndex = 0; datasetIndex < this.datasets.length; datasetIndex++) { + for (barIndex = 0; barIndex < this.datasets[datasetIndex].bars.length; barIndex++) { + if (this.datasets[datasetIndex].bars[barIndex].inRange(eventPosition.x,eventPosition.y)){ + helpers.each(this.datasets, datasetIterator); + return barsArray; + } + } + } + + return barsArray; + }, + buildScale : function(labels){ + var self = this; + + var dataTotal = function(){ + var values = []; + self.eachBars(function(bar){ + values.push(bar.value); + }); + return values; + }; + + var scaleOptions = { + templateString : this.options.scaleLabel, + height : this.chart.height, + width : this.chart.width, + ctx : this.chart.ctx, + textColor : this.options.scaleFontColor, + fontSize : this.options.scaleFontSize, + fontStyle : this.options.scaleFontStyle, + fontFamily : this.options.scaleFontFamily, + valuesCount : labels.length, + beginAtZero : this.options.scaleBeginAtZero, + integersOnly : this.options.scaleIntegersOnly, + calculateYRange: function(currentHeight){ + var updatedRanges = helpers.calculateScaleRange( + dataTotal(), + currentHeight, + this.fontSize, + this.beginAtZero, + this.integersOnly + ); + helpers.extend(this, updatedRanges); + }, + xLabels : labels, + font : helpers.fontString(this.options.scaleFontSize, this.options.scaleFontStyle, this.options.scaleFontFamily), + lineWidth : this.options.scaleLineWidth, + lineColor : this.options.scaleLineColor, + showHorizontalLines : this.options.scaleShowHorizontalLines, + showVerticalLines : this.options.scaleShowVerticalLines, + gridLineWidth : (this.options.scaleShowGridLines) ? this.options.scaleGridLineWidth : 0, + gridLineColor : (this.options.scaleShowGridLines) ? this.options.scaleGridLineColor : "rgba(0,0,0,0)", + padding : (this.options.showScale) ? 0 : (this.options.barShowStroke) ? this.options.barStrokeWidth : 0, + showLabels : this.options.scaleShowLabels, + display : this.options.showScale + }; + + if (this.options.scaleOverride){ + helpers.extend(scaleOptions, { + calculateYRange: helpers.noop, + steps: this.options.scaleSteps, + stepValue: this.options.scaleStepWidth, + min: this.options.scaleStartValue, + max: this.options.scaleStartValue + (this.options.scaleSteps * this.options.scaleStepWidth) + }); + } + + this.scale = new this.ScaleClass(scaleOptions); + }, + addData : function(valuesArray,label){ + //Map the values array for each of the datasets + helpers.each(valuesArray,function(value,datasetIndex){ + //Add a new point for each piece of data, passing any required data to draw. + this.datasets[datasetIndex].bars.push(new this.BarClass({ + value : value, + label : label, + x: this.scale.calculateBarX(this.datasets.length, datasetIndex, this.scale.valuesCount+1), + y: this.scale.endPoint, + width : this.scale.calculateBarWidth(this.datasets.length), + base : this.scale.endPoint, + strokeColor : this.datasets[datasetIndex].strokeColor, + fillColor : this.datasets[datasetIndex].fillColor + })); + },this); + + this.scale.addXLabel(label); + //Then re-render the chart. + this.update(); + }, + removeData : function(){ + this.scale.removeXLabel(); + //Then re-render the chart. + helpers.each(this.datasets,function(dataset){ + dataset.bars.shift(); + },this); + this.update(); + }, + reflow : function(){ + helpers.extend(this.BarClass.prototype,{ + y: this.scale.endPoint, + base : this.scale.endPoint + }); + var newScaleProps = helpers.extend({ + height : this.chart.height, + width : this.chart.width + }); + this.scale.update(newScaleProps); + }, + draw : function(ease){ + var easingDecimal = ease || 1; + this.clear(); + + var ctx = this.chart.ctx; + + this.scale.draw(easingDecimal); + + //Draw all the bars for each dataset + helpers.each(this.datasets,function(dataset,datasetIndex){ + helpers.each(dataset.bars,function(bar,index){ + if (bar.hasValue()){ + bar.base = this.scale.endPoint; + //Transition then draw + bar.transition({ + x : this.scale.calculateBarX(this.datasets.length, datasetIndex, index), + y : this.scale.calculateY(bar.value), + width : this.scale.calculateBarWidth(this.datasets.length) + }, easingDecimal).draw(); + } + },this); + + },this); + } + }); + + +}).call(this); + +(function(){ + "use strict"; + + var root = this, + Chart = root.Chart, + //Cache a local reference to Chart.helpers + helpers = Chart.helpers; + + var defaultConfig = { + //Boolean - Whether we should show a stroke on each segment + segmentShowStroke : true, + + //String - The colour of each segment stroke + segmentStrokeColor : "#fff", + + //Number - The width of each segment stroke + segmentStrokeWidth : 2, + + //The percentage of the chart that we cut out of the middle. + percentageInnerCutout : 50, + + //Number - Amount of animation steps + animationSteps : 100, + + //String - Animation easing effect + animationEasing : "easeOutBounce", + + //Boolean - Whether we animate the rotation of the Doughnut + animateRotate : true, + + //Boolean - Whether we animate scaling the Doughnut from the centre + animateScale : false, + + //String - A legend template + legendTemplate : "" + + }; + + + Chart.Type.extend({ + //Passing in a name registers this chart in the Chart namespace + name: "Doughnut", + //Providing a defaults will also register the deafults in the chart namespace + defaults : defaultConfig, + //Initialize is fired when the chart is initialized - Data is passed in as a parameter + //Config is automatically merged by the core of Chart.js, and is available at this.options + initialize: function(data){ + + //Declare segments as a static property to prevent inheriting across the Chart type prototype + this.segments = []; + this.outerRadius = (helpers.min([this.chart.width,this.chart.height]) - this.options.segmentStrokeWidth/2)/2; + + this.SegmentArc = Chart.Arc.extend({ + ctx : this.chart.ctx, + x : this.chart.width/2, + y : this.chart.height/2 + }); + + //Set up tooltip events on the chart + if (this.options.showTooltips){ + helpers.bindEvents(this, this.options.tooltipEvents, function(evt){ + var activeSegments = (evt.type !== 'mouseout') ? this.getSegmentsAtEvent(evt) : []; + + helpers.each(this.segments,function(segment){ + segment.restore(["fillColor"]); + }); + helpers.each(activeSegments,function(activeSegment){ + activeSegment.fillColor = activeSegment.highlightColor; + }); + this.showTooltip(activeSegments); + }); + } + this.calculateTotal(data); + + helpers.each(data,function(datapoint, index){ + this.addData(datapoint, index, true); + },this); + + this.render(); + }, + getSegmentsAtEvent : function(e){ + var segmentsArray = []; + + var location = helpers.getRelativePosition(e); + + helpers.each(this.segments,function(segment){ + if (segment.inRange(location.x,location.y)) segmentsArray.push(segment); + },this); + return segmentsArray; + }, + addData : function(segment, atIndex, silent){ + var index = atIndex || this.segments.length; + this.segments.splice(index, 0, new this.SegmentArc({ + value : segment.value, + outerRadius : (this.options.animateScale) ? 0 : this.outerRadius, + innerRadius : (this.options.animateScale) ? 0 : (this.outerRadius/100) * this.options.percentageInnerCutout, + fillColor : segment.color, + highlightColor : segment.highlight || segment.color, + showStroke : this.options.segmentShowStroke, + strokeWidth : this.options.segmentStrokeWidth, + strokeColor : this.options.segmentStrokeColor, + startAngle : Math.PI * 1.5, + circumference : (this.options.animateRotate) ? 0 : this.calculateCircumference(segment.value), + label : segment.label + })); + if (!silent){ + this.reflow(); + this.update(); + } + }, + calculateCircumference : function(value){ + return (Math.PI*2)*(Math.abs(value) / this.total); + }, + calculateTotal : function(data){ + this.total = 0; + helpers.each(data,function(segment){ + this.total += Math.abs(segment.value); + },this); + }, + update : function(){ + this.calculateTotal(this.segments); + + // Reset any highlight colours before updating. + helpers.each(this.activeElements, function(activeElement){ + activeElement.restore(['fillColor']); + }); + + helpers.each(this.segments,function(segment){ + segment.save(); + }); + this.render(); + }, + + removeData: function(atIndex){ + var indexToDelete = (helpers.isNumber(atIndex)) ? atIndex : this.segments.length-1; + this.segments.splice(indexToDelete, 1); + this.reflow(); + this.update(); + }, + + reflow : function(){ + helpers.extend(this.SegmentArc.prototype,{ + x : this.chart.width/2, + y : this.chart.height/2 + }); + this.outerRadius = (helpers.min([this.chart.width,this.chart.height]) - this.options.segmentStrokeWidth/2)/2; + helpers.each(this.segments, function(segment){ + segment.update({ + outerRadius : this.outerRadius, + innerRadius : (this.outerRadius/100) * this.options.percentageInnerCutout + }); + }, this); + }, + draw : function(easeDecimal){ + var animDecimal = (easeDecimal) ? easeDecimal : 1; + this.clear(); + helpers.each(this.segments,function(segment,index){ + segment.transition({ + circumference : this.calculateCircumference(segment.value), + outerRadius : this.outerRadius, + innerRadius : (this.outerRadius/100) * this.options.percentageInnerCutout + },animDecimal); + + segment.endAngle = segment.startAngle + segment.circumference; + + segment.draw(); + if (index === 0){ + segment.startAngle = Math.PI * 1.5; + } + //Check to see if it's the last segment, if not get the next and update the start angle + if (index < this.segments.length-1){ + this.segments[index+1].startAngle = segment.endAngle; + } + },this); + + } + }); + + Chart.types.Doughnut.extend({ + name : "Pie", + defaults : helpers.merge(defaultConfig,{percentageInnerCutout : 0}) + }); + +}).call(this); +(function(){ + "use strict"; + + var root = this, + Chart = root.Chart, + helpers = Chart.helpers; + + var defaultConfig = { + + ///Boolean - Whether grid lines are shown across the chart + scaleShowGridLines : true, + + //String - Colour of the grid lines + scaleGridLineColor : "rgba(0,0,0,.05)", + + //Number - Width of the grid lines + scaleGridLineWidth : 1, + + //Boolean - Whether to show horizontal lines (except X axis) + scaleShowHorizontalLines: true, + + //Boolean - Whether to show vertical lines (except Y axis) + scaleShowVerticalLines: true, + + //Boolean - Whether the line is curved between points + bezierCurve : true, + + //Number - Tension of the bezier curve between points + bezierCurveTension : 0.4, + + //Boolean - Whether to show a dot for each point + pointDot : true, + + //Number - Radius of each point dot in pixels + pointDotRadius : 4, + + //Number - Pixel width of point dot stroke + pointDotStrokeWidth : 1, + + //Number - amount extra to add to the radius to cater for hit detection outside the drawn point + pointHitDetectionRadius : 20, + + //Boolean - Whether to show a stroke for datasets + datasetStroke : true, + + //Number - Pixel width of dataset stroke + datasetStrokeWidth : 2, + + //Boolean - Whether to fill the dataset with a colour + datasetFill : true, + + //String - A legend template + legendTemplate : "" + + }; + + + Chart.Type.extend({ + name: "Line", + defaults : defaultConfig, + initialize: function(data){ + //Declare the extension of the default point, to cater for the options passed in to the constructor + this.PointClass = Chart.Point.extend({ + strokeWidth : this.options.pointDotStrokeWidth, + radius : this.options.pointDotRadius, + display: this.options.pointDot, + hitDetectionRadius : this.options.pointHitDetectionRadius, + ctx : this.chart.ctx, + inRange : function(mouseX){ + return (Math.pow(mouseX-this.x, 2) < Math.pow(this.radius + this.hitDetectionRadius,2)); + } + }); + + this.datasets = []; + + //Set up tooltip events on the chart + if (this.options.showTooltips){ + helpers.bindEvents(this, this.options.tooltipEvents, function(evt){ + var activePoints = (evt.type !== 'mouseout') ? this.getPointsAtEvent(evt) : []; + this.eachPoints(function(point){ + point.restore(['fillColor', 'strokeColor']); + }); + helpers.each(activePoints, function(activePoint){ + activePoint.fillColor = activePoint.highlightFill; + activePoint.strokeColor = activePoint.highlightStroke; + }); + this.showTooltip(activePoints); + }); + } + + //Iterate through each of the datasets, and build this into a property of the chart + helpers.each(data.datasets,function(dataset){ + + var datasetObject = { + label : dataset.label || null, + fillColor : dataset.fillColor, + strokeColor : dataset.strokeColor, + pointColor : dataset.pointColor, + pointStrokeColor : dataset.pointStrokeColor, + points : [] + }; + + this.datasets.push(datasetObject); + + + helpers.each(dataset.data,function(dataPoint,index){ + //Add a new point for each piece of data, passing any required data to draw. + datasetObject.points.push(new this.PointClass({ + value : dataPoint, + label : data.labels[index], + datasetLabel: dataset.label, + strokeColor : dataset.pointStrokeColor, + fillColor : dataset.pointColor, + highlightFill : dataset.pointHighlightFill || dataset.pointColor, + highlightStroke : dataset.pointHighlightStroke || dataset.pointStrokeColor + })); + },this); + + this.buildScale(data.labels); + + + this.eachPoints(function(point, index){ + helpers.extend(point, { + x: this.scale.calculateX(index), + y: this.scale.endPoint + }); + point.save(); + }, this); + + },this); + + + this.render(); + }, + update : function(){ + this.scale.update(); + // Reset any highlight colours before updating. + helpers.each(this.activeElements, function(activeElement){ + activeElement.restore(['fillColor', 'strokeColor']); + }); + this.eachPoints(function(point){ + point.save(); + }); + this.render(); + }, + eachPoints : function(callback){ + helpers.each(this.datasets,function(dataset){ + helpers.each(dataset.points,callback,this); + },this); + }, + getPointsAtEvent : function(e){ + var pointsArray = [], + eventPosition = helpers.getRelativePosition(e); + helpers.each(this.datasets,function(dataset){ + helpers.each(dataset.points,function(point){ + if (point.inRange(eventPosition.x,eventPosition.y)) pointsArray.push(point); + }); + },this); + return pointsArray; + }, + buildScale : function(labels){ + var self = this; + + var dataTotal = function(){ + var values = []; + self.eachPoints(function(point){ + values.push(point.value); + }); + + return values; + }; + + var scaleOptions = { + templateString : this.options.scaleLabel, + height : this.chart.height, + width : this.chart.width, + ctx : this.chart.ctx, + textColor : this.options.scaleFontColor, + fontSize : this.options.scaleFontSize, + fontStyle : this.options.scaleFontStyle, + fontFamily : this.options.scaleFontFamily, + valuesCount : labels.length, + beginAtZero : this.options.scaleBeginAtZero, + integersOnly : this.options.scaleIntegersOnly, + calculateYRange : function(currentHeight){ + var updatedRanges = helpers.calculateScaleRange( + dataTotal(), + currentHeight, + this.fontSize, + this.beginAtZero, + this.integersOnly + ); + helpers.extend(this, updatedRanges); + }, + xLabels : labels, + font : helpers.fontString(this.options.scaleFontSize, this.options.scaleFontStyle, this.options.scaleFontFamily), + lineWidth : this.options.scaleLineWidth, + lineColor : this.options.scaleLineColor, + showHorizontalLines : this.options.scaleShowHorizontalLines, + showVerticalLines : this.options.scaleShowVerticalLines, + gridLineWidth : (this.options.scaleShowGridLines) ? this.options.scaleGridLineWidth : 0, + gridLineColor : (this.options.scaleShowGridLines) ? this.options.scaleGridLineColor : "rgba(0,0,0,0)", + padding: (this.options.showScale) ? 0 : this.options.pointDotRadius + this.options.pointDotStrokeWidth, + showLabels : this.options.scaleShowLabels, + display : this.options.showScale + }; + + if (this.options.scaleOverride){ + helpers.extend(scaleOptions, { + calculateYRange: helpers.noop, + steps: this.options.scaleSteps, + stepValue: this.options.scaleStepWidth, + min: this.options.scaleStartValue, + max: this.options.scaleStartValue + (this.options.scaleSteps * this.options.scaleStepWidth) + }); + } + + + this.scale = new Chart.Scale(scaleOptions); + }, + addData : function(valuesArray,label){ + //Map the values array for each of the datasets + + helpers.each(valuesArray,function(value,datasetIndex){ + //Add a new point for each piece of data, passing any required data to draw. + this.datasets[datasetIndex].points.push(new this.PointClass({ + value : value, + label : label, + x: this.scale.calculateX(this.scale.valuesCount+1), + y: this.scale.endPoint, + strokeColor : this.datasets[datasetIndex].pointStrokeColor, + fillColor : this.datasets[datasetIndex].pointColor + })); + },this); + + this.scale.addXLabel(label); + //Then re-render the chart. + this.update(); + }, + removeData : function(){ + this.scale.removeXLabel(); + //Then re-render the chart. + helpers.each(this.datasets,function(dataset){ + dataset.points.shift(); + },this); + this.update(); + }, + reflow : function(){ + var newScaleProps = helpers.extend({ + height : this.chart.height, + width : this.chart.width + }); + this.scale.update(newScaleProps); + }, + draw : function(ease){ + var easingDecimal = ease || 1; + this.clear(); + + var ctx = this.chart.ctx; + + // Some helper methods for getting the next/prev points + var hasValue = function(item){ + return item.value !== null; + }, + nextPoint = function(point, collection, index){ + return helpers.findNextWhere(collection, hasValue, index) || point; + }, + previousPoint = function(point, collection, index){ + return helpers.findPreviousWhere(collection, hasValue, index) || point; + }; + + this.scale.draw(easingDecimal); + + + helpers.each(this.datasets,function(dataset){ + var pointsWithValues = helpers.where(dataset.points, hasValue); + + //Transition each point first so that the line and point drawing isn't out of sync + //We can use this extra loop to calculate the control points of this dataset also in this loop + + helpers.each(dataset.points, function(point, index){ + if (point.hasValue()){ + point.transition({ + y : this.scale.calculateY(point.value), + x : this.scale.calculateX(index) + }, easingDecimal); + } + },this); + + + // Control points need to be calculated in a seperate loop, because we need to know the current x/y of the point + // This would cause issues when there is no animation, because the y of the next point would be 0, so beziers would be skewed + if (this.options.bezierCurve){ + helpers.each(pointsWithValues, function(point, index){ + var tension = (index > 0 && index < pointsWithValues.length - 1) ? this.options.bezierCurveTension : 0; + point.controlPoints = helpers.splineCurve( + previousPoint(point, pointsWithValues, index), + point, + nextPoint(point, pointsWithValues, index), + tension + ); + + // Prevent the bezier going outside of the bounds of the graph + + // Cap puter bezier handles to the upper/lower scale bounds + if (point.controlPoints.outer.y > this.scale.endPoint){ + point.controlPoints.outer.y = this.scale.endPoint; + } + else if (point.controlPoints.outer.y < this.scale.startPoint){ + point.controlPoints.outer.y = this.scale.startPoint; + } + + // Cap inner bezier handles to the upper/lower scale bounds + if (point.controlPoints.inner.y > this.scale.endPoint){ + point.controlPoints.inner.y = this.scale.endPoint; + } + else if (point.controlPoints.inner.y < this.scale.startPoint){ + point.controlPoints.inner.y = this.scale.startPoint; + } + },this); + } + + + //Draw the line between all the points + ctx.lineWidth = this.options.datasetStrokeWidth; + ctx.strokeStyle = dataset.strokeColor; + ctx.beginPath(); + + helpers.each(pointsWithValues, function(point, index){ + if (index === 0){ + ctx.moveTo(point.x, point.y); + } + else{ + if(this.options.bezierCurve){ + var previous = previousPoint(point, pointsWithValues, index); + + ctx.bezierCurveTo( + previous.controlPoints.outer.x, + previous.controlPoints.outer.y, + point.controlPoints.inner.x, + point.controlPoints.inner.y, + point.x, + point.y + ); + } + else{ + ctx.lineTo(point.x,point.y); + } + } + }, this); + + ctx.stroke(); + + if (this.options.datasetFill && pointsWithValues.length > 0){ + //Round off the line by going to the base of the chart, back to the start, then fill. + ctx.lineTo(pointsWithValues[pointsWithValues.length - 1].x, this.scale.endPoint); + ctx.lineTo(pointsWithValues[0].x, this.scale.endPoint); + ctx.fillStyle = dataset.fillColor; + ctx.closePath(); + ctx.fill(); + } + + //Now draw the points over the line + //A little inefficient double looping, but better than the line + //lagging behind the point positions + helpers.each(pointsWithValues,function(point){ + point.draw(); + }); + },this); + } + }); + + +}).call(this); + +(function(){ + "use strict"; + + var root = this, + Chart = root.Chart, + //Cache a local reference to Chart.helpers + helpers = Chart.helpers; + + var defaultConfig = { + //Boolean - Show a backdrop to the scale label + scaleShowLabelBackdrop : true, + + //String - The colour of the label backdrop + scaleBackdropColor : "rgba(255,255,255,0.75)", + + // Boolean - Whether the scale should begin at zero + scaleBeginAtZero : true, + + //Number - The backdrop padding above & below the label in pixels + scaleBackdropPaddingY : 2, + + //Number - The backdrop padding to the side of the label in pixels + scaleBackdropPaddingX : 2, + + //Boolean - Show line for each value in the scale + scaleShowLine : true, + + //Boolean - Stroke a line around each segment in the chart + segmentShowStroke : true, + + //String - The colour of the stroke on each segement. + segmentStrokeColor : "#fff", + + //Number - The width of the stroke value in pixels + segmentStrokeWidth : 2, + + //Number - Amount of animation steps + animationSteps : 100, + + //String - Animation easing effect. + animationEasing : "easeOutBounce", + + //Boolean - Whether to animate the rotation of the chart + animateRotate : true, + + //Boolean - Whether to animate scaling the chart from the centre + animateScale : false, + + //String - A legend template + legendTemplate : "" + }; + + + Chart.Type.extend({ + //Passing in a name registers this chart in the Chart namespace + name: "PolarArea", + //Providing a defaults will also register the deafults in the chart namespace + defaults : defaultConfig, + //Initialize is fired when the chart is initialized - Data is passed in as a parameter + //Config is automatically merged by the core of Chart.js, and is available at this.options + initialize: function(data){ + this.segments = []; + //Declare segment class as a chart instance specific class, so it can share props for this instance + this.SegmentArc = Chart.Arc.extend({ + showStroke : this.options.segmentShowStroke, + strokeWidth : this.options.segmentStrokeWidth, + strokeColor : this.options.segmentStrokeColor, + ctx : this.chart.ctx, + innerRadius : 0, + x : this.chart.width/2, + y : this.chart.height/2 + }); + this.scale = new Chart.RadialScale({ + display: this.options.showScale, + fontStyle: this.options.scaleFontStyle, + fontSize: this.options.scaleFontSize, + fontFamily: this.options.scaleFontFamily, + fontColor: this.options.scaleFontColor, + showLabels: this.options.scaleShowLabels, + showLabelBackdrop: this.options.scaleShowLabelBackdrop, + backdropColor: this.options.scaleBackdropColor, + backdropPaddingY : this.options.scaleBackdropPaddingY, + backdropPaddingX: this.options.scaleBackdropPaddingX, + lineWidth: (this.options.scaleShowLine) ? this.options.scaleLineWidth : 0, + lineColor: this.options.scaleLineColor, + lineArc: true, + width: this.chart.width, + height: this.chart.height, + xCenter: this.chart.width/2, + yCenter: this.chart.height/2, + ctx : this.chart.ctx, + templateString: this.options.scaleLabel, + valuesCount: data.length + }); + + this.updateScaleRange(data); + + this.scale.update(); + + helpers.each(data,function(segment,index){ + this.addData(segment,index,true); + },this); + + //Set up tooltip events on the chart + if (this.options.showTooltips){ + helpers.bindEvents(this, this.options.tooltipEvents, function(evt){ + var activeSegments = (evt.type !== 'mouseout') ? this.getSegmentsAtEvent(evt) : []; + helpers.each(this.segments,function(segment){ + segment.restore(["fillColor"]); + }); + helpers.each(activeSegments,function(activeSegment){ + activeSegment.fillColor = activeSegment.highlightColor; + }); + this.showTooltip(activeSegments); + }); + } + + this.render(); + }, + getSegmentsAtEvent : function(e){ + var segmentsArray = []; + + var location = helpers.getRelativePosition(e); + + helpers.each(this.segments,function(segment){ + if (segment.inRange(location.x,location.y)) segmentsArray.push(segment); + },this); + return segmentsArray; + }, + addData : function(segment, atIndex, silent){ + var index = atIndex || this.segments.length; + + this.segments.splice(index, 0, new this.SegmentArc({ + fillColor: segment.color, + highlightColor: segment.highlight || segment.color, + label: segment.label, + value: segment.value, + outerRadius: (this.options.animateScale) ? 0 : this.scale.calculateCenterOffset(segment.value), + circumference: (this.options.animateRotate) ? 0 : this.scale.getCircumference(), + startAngle: Math.PI * 1.5 + })); + if (!silent){ + this.reflow(); + this.update(); + } + }, + removeData: function(atIndex){ + var indexToDelete = (helpers.isNumber(atIndex)) ? atIndex : this.segments.length-1; + this.segments.splice(indexToDelete, 1); + this.reflow(); + this.update(); + }, + calculateTotal: function(data){ + this.total = 0; + helpers.each(data,function(segment){ + this.total += segment.value; + },this); + this.scale.valuesCount = this.segments.length; + }, + updateScaleRange: function(datapoints){ + var valuesArray = []; + helpers.each(datapoints,function(segment){ + valuesArray.push(segment.value); + }); + + var scaleSizes = (this.options.scaleOverride) ? + { + steps: this.options.scaleSteps, + stepValue: this.options.scaleStepWidth, + min: this.options.scaleStartValue, + max: this.options.scaleStartValue + (this.options.scaleSteps * this.options.scaleStepWidth) + } : + helpers.calculateScaleRange( + valuesArray, + helpers.min([this.chart.width, this.chart.height])/2, + this.options.scaleFontSize, + this.options.scaleBeginAtZero, + this.options.scaleIntegersOnly + ); + + helpers.extend( + this.scale, + scaleSizes, + { + size: helpers.min([this.chart.width, this.chart.height]), + xCenter: this.chart.width/2, + yCenter: this.chart.height/2 + } + ); + + }, + update : function(){ + this.calculateTotal(this.segments); + + helpers.each(this.segments,function(segment){ + segment.save(); + }); + + this.reflow(); + this.render(); + }, + reflow : function(){ + helpers.extend(this.SegmentArc.prototype,{ + x : this.chart.width/2, + y : this.chart.height/2 + }); + this.updateScaleRange(this.segments); + this.scale.update(); + + helpers.extend(this.scale,{ + xCenter: this.chart.width/2, + yCenter: this.chart.height/2 + }); + + helpers.each(this.segments, function(segment){ + segment.update({ + outerRadius : this.scale.calculateCenterOffset(segment.value) + }); + }, this); + + }, + draw : function(ease){ + var easingDecimal = ease || 1; + //Clear & draw the canvas + this.clear(); + helpers.each(this.segments,function(segment, index){ + segment.transition({ + circumference : this.scale.getCircumference(), + outerRadius : this.scale.calculateCenterOffset(segment.value) + },easingDecimal); + + segment.endAngle = segment.startAngle + segment.circumference; + + // If we've removed the first segment we need to set the first one to + // start at the top. + if (index === 0){ + segment.startAngle = Math.PI * 1.5; + } + + //Check to see if it's the last segment, if not get the next and update the start angle + if (index < this.segments.length - 1){ + this.segments[index+1].startAngle = segment.endAngle; + } + segment.draw(); + }, this); + this.scale.draw(); + } + }); + +}).call(this); +(function(){ + "use strict"; + + var root = this, + Chart = root.Chart, + helpers = Chart.helpers; + + + + Chart.Type.extend({ + name: "Radar", + defaults:{ + //Boolean - Whether to show lines for each scale point + scaleShowLine : true, + + //Boolean - Whether we show the angle lines out of the radar + angleShowLineOut : true, + + //Boolean - Whether to show labels on the scale + scaleShowLabels : false, + + // Boolean - Whether the scale should begin at zero + scaleBeginAtZero : true, + + //String - Colour of the angle line + angleLineColor : "rgba(0,0,0,.1)", + + //Number - Pixel width of the angle line + angleLineWidth : 1, + + //String - Point label font declaration + pointLabelFontFamily : "'Arial'", + + //String - Point label font weight + pointLabelFontStyle : "normal", + + //Number - Point label font size in pixels + pointLabelFontSize : 10, + + //String - Point label font colour + pointLabelFontColor : "#666", + + //Boolean - Whether to show a dot for each point + pointDot : true, + + //Number - Radius of each point dot in pixels + pointDotRadius : 3, + + //Number - Pixel width of point dot stroke + pointDotStrokeWidth : 1, + + //Number - amount extra to add to the radius to cater for hit detection outside the drawn point + pointHitDetectionRadius : 20, + + //Boolean - Whether to show a stroke for datasets + datasetStroke : true, + + //Number - Pixel width of dataset stroke + datasetStrokeWidth : 2, + + //Boolean - Whether to fill the dataset with a colour + datasetFill : true, + + //String - A legend template + legendTemplate : "" + + }, + + initialize: function(data){ + this.PointClass = Chart.Point.extend({ + strokeWidth : this.options.pointDotStrokeWidth, + radius : this.options.pointDotRadius, + display: this.options.pointDot, + hitDetectionRadius : this.options.pointHitDetectionRadius, + ctx : this.chart.ctx + }); + + this.datasets = []; + + this.buildScale(data); + + //Set up tooltip events on the chart + if (this.options.showTooltips){ + helpers.bindEvents(this, this.options.tooltipEvents, function(evt){ + var activePointsCollection = (evt.type !== 'mouseout') ? this.getPointsAtEvent(evt) : []; + + this.eachPoints(function(point){ + point.restore(['fillColor', 'strokeColor']); + }); + helpers.each(activePointsCollection, function(activePoint){ + activePoint.fillColor = activePoint.highlightFill; + activePoint.strokeColor = activePoint.highlightStroke; + }); + + this.showTooltip(activePointsCollection); + }); + } + + //Iterate through each of the datasets, and build this into a property of the chart + helpers.each(data.datasets,function(dataset){ + + var datasetObject = { + label: dataset.label || null, + fillColor : dataset.fillColor, + strokeColor : dataset.strokeColor, + pointColor : dataset.pointColor, + pointStrokeColor : dataset.pointStrokeColor, + points : [] + }; + + this.datasets.push(datasetObject); + + helpers.each(dataset.data,function(dataPoint,index){ + //Add a new point for each piece of data, passing any required data to draw. + var pointPosition; + if (!this.scale.animation){ + pointPosition = this.scale.getPointPosition(index, this.scale.calculateCenterOffset(dataPoint)); + } + datasetObject.points.push(new this.PointClass({ + value : dataPoint, + label : data.labels[index], + datasetLabel: dataset.label, + x: (this.options.animation) ? this.scale.xCenter : pointPosition.x, + y: (this.options.animation) ? this.scale.yCenter : pointPosition.y, + strokeColor : dataset.pointStrokeColor, + fillColor : dataset.pointColor, + highlightFill : dataset.pointHighlightFill || dataset.pointColor, + highlightStroke : dataset.pointHighlightStroke || dataset.pointStrokeColor + })); + },this); + + },this); + + this.render(); + }, + eachPoints : function(callback){ + helpers.each(this.datasets,function(dataset){ + helpers.each(dataset.points,callback,this); + },this); + }, + + getPointsAtEvent : function(evt){ + var mousePosition = helpers.getRelativePosition(evt), + fromCenter = helpers.getAngleFromPoint({ + x: this.scale.xCenter, + y: this.scale.yCenter + }, mousePosition); + + var anglePerIndex = (Math.PI * 2) /this.scale.valuesCount, + pointIndex = Math.round((fromCenter.angle - Math.PI * 1.5) / anglePerIndex), + activePointsCollection = []; + + // If we're at the top, make the pointIndex 0 to get the first of the array. + if (pointIndex >= this.scale.valuesCount || pointIndex < 0){ + pointIndex = 0; + } + + if (fromCenter.distance <= this.scale.drawingArea){ + helpers.each(this.datasets, function(dataset){ + activePointsCollection.push(dataset.points[pointIndex]); + }); + } + + return activePointsCollection; + }, + + buildScale : function(data){ + this.scale = new Chart.RadialScale({ + display: this.options.showScale, + fontStyle: this.options.scaleFontStyle, + fontSize: this.options.scaleFontSize, + fontFamily: this.options.scaleFontFamily, + fontColor: this.options.scaleFontColor, + showLabels: this.options.scaleShowLabels, + showLabelBackdrop: this.options.scaleShowLabelBackdrop, + backdropColor: this.options.scaleBackdropColor, + backdropPaddingY : this.options.scaleBackdropPaddingY, + backdropPaddingX: this.options.scaleBackdropPaddingX, + lineWidth: (this.options.scaleShowLine) ? this.options.scaleLineWidth : 0, + lineColor: this.options.scaleLineColor, + angleLineColor : this.options.angleLineColor, + angleLineWidth : (this.options.angleShowLineOut) ? this.options.angleLineWidth : 0, + // Point labels at the edge of each line + pointLabelFontColor : this.options.pointLabelFontColor, + pointLabelFontSize : this.options.pointLabelFontSize, + pointLabelFontFamily : this.options.pointLabelFontFamily, + pointLabelFontStyle : this.options.pointLabelFontStyle, + height : this.chart.height, + width: this.chart.width, + xCenter: this.chart.width/2, + yCenter: this.chart.height/2, + ctx : this.chart.ctx, + templateString: this.options.scaleLabel, + labels: data.labels, + valuesCount: data.datasets[0].data.length + }); + + this.scale.setScaleSize(); + this.updateScaleRange(data.datasets); + this.scale.buildYLabels(); + }, + updateScaleRange: function(datasets){ + var valuesArray = (function(){ + var totalDataArray = []; + helpers.each(datasets,function(dataset){ + if (dataset.data){ + totalDataArray = totalDataArray.concat(dataset.data); + } + else { + helpers.each(dataset.points, function(point){ + totalDataArray.push(point.value); + }); + } + }); + return totalDataArray; + })(); + + + var scaleSizes = (this.options.scaleOverride) ? + { + steps: this.options.scaleSteps, + stepValue: this.options.scaleStepWidth, + min: this.options.scaleStartValue, + max: this.options.scaleStartValue + (this.options.scaleSteps * this.options.scaleStepWidth) + } : + helpers.calculateScaleRange( + valuesArray, + helpers.min([this.chart.width, this.chart.height])/2, + this.options.scaleFontSize, + this.options.scaleBeginAtZero, + this.options.scaleIntegersOnly + ); + + helpers.extend( + this.scale, + scaleSizes + ); + + }, + addData : function(valuesArray,label){ + //Map the values array for each of the datasets + this.scale.valuesCount++; + helpers.each(valuesArray,function(value,datasetIndex){ + var pointPosition = this.scale.getPointPosition(this.scale.valuesCount, this.scale.calculateCenterOffset(value)); + this.datasets[datasetIndex].points.push(new this.PointClass({ + value : value, + label : label, + x: pointPosition.x, + y: pointPosition.y, + strokeColor : this.datasets[datasetIndex].pointStrokeColor, + fillColor : this.datasets[datasetIndex].pointColor + })); + },this); + + this.scale.labels.push(label); + + this.reflow(); + + this.update(); + }, + removeData : function(){ + this.scale.valuesCount--; + this.scale.labels.shift(); + helpers.each(this.datasets,function(dataset){ + dataset.points.shift(); + },this); + this.reflow(); + this.update(); + }, + update : function(){ + this.eachPoints(function(point){ + point.save(); + }); + this.reflow(); + this.render(); + }, + reflow: function(){ + helpers.extend(this.scale, { + width : this.chart.width, + height: this.chart.height, + size : helpers.min([this.chart.width, this.chart.height]), + xCenter: this.chart.width/2, + yCenter: this.chart.height/2 + }); + this.updateScaleRange(this.datasets); + this.scale.setScaleSize(); + this.scale.buildYLabels(); + }, + draw : function(ease){ + var easeDecimal = ease || 1, + ctx = this.chart.ctx; + this.clear(); + this.scale.draw(); + + helpers.each(this.datasets,function(dataset){ + + //Transition each point first so that the line and point drawing isn't out of sync + helpers.each(dataset.points,function(point,index){ + if (point.hasValue()){ + point.transition(this.scale.getPointPosition(index, this.scale.calculateCenterOffset(point.value)), easeDecimal); + } + },this); + + + + //Draw the line between all the points + ctx.lineWidth = this.options.datasetStrokeWidth; + ctx.strokeStyle = dataset.strokeColor; + ctx.beginPath(); + helpers.each(dataset.points,function(point,index){ + if (index === 0){ + ctx.moveTo(point.x,point.y); + } + else{ + ctx.lineTo(point.x,point.y); + } + },this); + ctx.closePath(); + ctx.stroke(); + + ctx.fillStyle = dataset.fillColor; + ctx.fill(); + + //Now draw the points over the line + //A little inefficient double looping, but better than the line + //lagging behind the point positions + helpers.each(dataset.points,function(point){ + if (point.hasValue()){ + point.draw(); + } + }); + + },this); + + } + + }); + + + + + +}).call(this); diff --git a/perfdash/www/angular-chart.css b/perfdash/www/angular-chart.css new file mode 100644 index 000000000000..e6598ee41b4f --- /dev/null +++ b/perfdash/www/angular-chart.css @@ -0,0 +1,49 @@ +.chart-legend, +.bar-legend, +.line-legend, +.pie-legend, +.radar-legend, +.polararea-legend, +.doughnut-legend { + list-style-type: none; + margin-top: 5px; + text-align: center; + /* NOTE: Browsers automatically add 40px of padding-left to all lists, so we should offset that, otherwise the legend is off-center */ + -webkit-padding-start: 0; + /* Webkit */ + -moz-padding-start: 0; + /* Mozilla */ + padding-left: 0; + /* IE (handles all cases, really, but we should also include the vendor-specific properties just to be safe) */ +} +.chart-legend li, +.bar-legend li, +.line-legend li, +.pie-legend li, +.radar-legend li, +.polararea-legend li, +.doughnut-legend li { + display: inline-block; + white-space: nowrap; + position: relative; + margin-bottom: 4px; + border-radius: 5px; + padding: 2px 8px 2px 28px; + font-size: smaller; + cursor: default; +} +.chart-legend li span, +.bar-legend li span, +.line-legend li span, +.pie-legend li span, +.radar-legend li span, +.polararea-legend li span, +.doughnut-legend li span { + display: block; + position: absolute; + left: 0; + top: 0; + width: 20px; + height: 20px; + border-radius: 5px; +} diff --git a/perfdash/www/angular-chart.js b/perfdash/www/angular-chart.js new file mode 100644 index 000000000000..c7a59c50c94d --- /dev/null +++ b/perfdash/www/angular-chart.js @@ -0,0 +1,359 @@ +(function (factory) { + 'use strict'; + if (typeof exports === 'object') { + // Node/CommonJS + module.exports = factory(require('angular'), require('chart.js')); + } else if (typeof define === 'function' && define.amd) { + // AMD. Register as an anonymous module. + define(['angular', 'chart'], factory); + } else { + // Browser globals + factory(angular, Chart); + } +}(function (angular, Chart) { + 'use strict'; + + Chart.defaults.global.responsive = true; + Chart.defaults.global.multiTooltipTemplate = '<%if (datasetLabel){%><%=datasetLabel%>: <%}%><%= value %>'; + Chart.defaults.global.colours = [ + '#97BBCD', // blue + '#4D5360', // dark grey + '#F7464A', // red + '#46BFBD', // green + '#FDB45C', // yellow + '#949FB1', // grey + '#DCDCDC', // light grey + ]; + + var usingExcanvas = typeof window.G_vmlCanvasManager === 'object' && + window.G_vmlCanvasManager !== null && + typeof window.G_vmlCanvasManager.initElement === 'function'; + + if (usingExcanvas) Chart.defaults.global.animation = false; + + return angular.module('chart.js', []) + .provider('ChartJs', ChartJsProvider) + .factory('ChartJsFactory', ['ChartJs', '$timeout', ChartJsFactory]) + .directive('chartBase', ['ChartJsFactory', function (ChartJsFactory) { return new ChartJsFactory(); }]) + .directive('chartLine', ['ChartJsFactory', function (ChartJsFactory) { return new ChartJsFactory('Line'); }]) + .directive('chartBar', ['ChartJsFactory', function (ChartJsFactory) { return new ChartJsFactory('Bar'); }]) + .directive('chartRadar', ['ChartJsFactory', function (ChartJsFactory) { return new ChartJsFactory('Radar'); }]) + .directive('chartDoughnut', ['ChartJsFactory', function (ChartJsFactory) { return new ChartJsFactory('Doughnut'); }]) + .directive('chartPie', ['ChartJsFactory', function (ChartJsFactory) { return new ChartJsFactory('Pie'); }]) + .directive('chartPolarArea', ['ChartJsFactory', function (ChartJsFactory) { return new ChartJsFactory('PolarArea'); }]); + + /** + * Wrapper for chart.js + * Allows configuring chart js using the provider + * + * angular.module('myModule', ['chart.js']).config(function(ChartJsProvider) { + * ChartJsProvider.setOptions({ responsive: true }); + * ChartJsProvider.setOptions('Line', { responsive: false }); + * }))) + */ + function ChartJsProvider () { + var options = {}; + var ChartJs = { + Chart: Chart, + getOptions: function (type) { + var typeOptions = type && options[type] || {}; + return angular.extend({}, options, typeOptions); + } + }; + + /** + * Allow to set global options during configuration + */ + this.setOptions = function (type, customOptions) { + // If no type was specified set option for the global object + if (! customOptions) { + customOptions = type; + options = angular.extend(options, customOptions); + return; + } + // Set options for the specific chart + options[type] = angular.extend(options[type] || {}, customOptions); + }; + + this.$get = function () { + return ChartJs; + }; + } + + function ChartJsFactory (ChartJs, $timeout) { + return function chart (type) { + return { + restrict: 'CA', + scope: { + data: '=?', + labels: '=?', + options: '=?', + series: '=?', + colours: '=?', + getColour: '=?', + chartType: '=', + legend: '@', + click: '=?', + hover: '=?', + + chartData: '=?', + chartLabels: '=?', + chartOptions: '=?', + chartSeries: '=?', + chartColours: '=?', + chartLegend: '@', + chartClick: '=?', + chartHover: '=?' + }, + link: function (scope, elem/*, attrs */) { + var chart, container = document.createElement('div'); + container.className = 'chart-container'; + elem.replaceWith(container); + container.appendChild(elem[0]); + + if (usingExcanvas) window.G_vmlCanvasManager.initElement(elem[0]); + + ['data', 'labels', 'options', 'series', 'colours', 'legend', 'click', 'hover'].forEach(deprecated); + function aliasVar (fromName, toName) { + scope.$watch(fromName, function (newVal) { + if (typeof newVal === 'undefined') return; + scope[toName] = newVal; + }); + } + /* provide backward compatibility to "old" directive names, by + * having an alias point from the new names to the old names. */ + aliasVar('chartData', 'data'); + aliasVar('chartLabels', 'labels'); + aliasVar('chartOptions', 'options'); + aliasVar('chartSeries', 'series'); + aliasVar('chartColours', 'colours'); + aliasVar('chartLegend', 'legend'); + aliasVar('chartClick', 'click'); + aliasVar('chartHover', 'hover'); + + // Order of setting "watch" matter + + scope.$watch('data', function (newVal, oldVal) { + if (! newVal || ! newVal.length || (Array.isArray(newVal[0]) && ! newVal[0].length)) return; + var chartType = type || scope.chartType; + if (! chartType) return; + + if (chart) { + if (canUpdateChart(newVal, oldVal)) return updateChart(chart, newVal, scope, elem); + chart.destroy(); + } + + createChart(chartType); + }, true); + + scope.$watch('series', resetChart, true); + scope.$watch('labels', resetChart, true); + scope.$watch('options', resetChart, true); + scope.$watch('colours', resetChart, true); + + scope.$watch('chartType', function (newVal, oldVal) { + if (isEmpty(newVal)) return; + if (angular.equals(newVal, oldVal)) return; + if (chart) chart.destroy(); + createChart(newVal); + }); + + scope.$on('$destroy', function () { + if (chart) chart.destroy(); + }); + + function resetChart (newVal, oldVal) { + if (isEmpty(newVal)) return; + if (angular.equals(newVal, oldVal)) return; + var chartType = type || scope.chartType; + if (! chartType) return; + + // chart.update() doesn't work for series and labels + // so we have to re-create the chart entirely + if (chart) chart.destroy(); + + createChart(chartType); + } + + function createChart (type) { + if (isResponsive(type, scope) && elem[0].clientHeight === 0 && container.clientHeight === 0) { + return $timeout(function () { + createChart(type); + }, 50, false); + } + if (! scope.data || ! scope.data.length) return; + scope.getColour = typeof scope.getColour === 'function' ? scope.getColour : getRandomColour; + scope.colours = getColours(type, scope); + var cvs = elem[0], ctx = cvs.getContext('2d'); + var data = Array.isArray(scope.data[0]) ? + getDataSets(scope.labels, scope.data, scope.series || [], scope.colours) : + getData(scope.labels, scope.data, scope.colours); + var options = angular.extend({}, ChartJs.getOptions(type), scope.options); + chart = new ChartJs.Chart(ctx)[type](data, options); + scope.$emit('create', chart); + + // Bind events + cvs.onclick = scope.click ? getEventHandler(scope, chart, 'click', false) : angular.noop; + cvs.onmousemove = scope.hover ? getEventHandler(scope, chart, 'hover', true) : angular.noop; + + if (scope.legend && scope.legend !== 'false') setLegend(elem, chart); + } + + function deprecated (attr) { + if (typeof console !== 'undefined' && ChartJs.getOptions().env !== 'test') { + var warn = typeof console.warn === 'function' ? console.warn : console.log; + if (!! scope[attr]) { + warn.call(console, '"%s" is deprecated and will be removed in a future version. ' + + 'Please use "chart-%s" instead.', attr, attr); + } + } + } + } + }; + }; + + function canUpdateChart (newVal, oldVal) { + if (newVal && oldVal && newVal.length && oldVal.length) { + return Array.isArray(newVal[0]) ? + newVal.length === oldVal.length && newVal.every(function (element, index) { + return element.length === oldVal[index].length; }) : + oldVal.reduce(sum, 0) > 0 ? newVal.length === oldVal.length : false; + } + return false; + } + + function sum (carry, val) { + return carry + val; + } + + function getEventHandler (scope, chart, action, triggerOnlyOnChange) { + var lastState = null; + return function (evt) { + var atEvent = chart.getPointsAtEvent || chart.getBarsAtEvent || chart.getSegmentsAtEvent; + if (atEvent) { + var activePoints = atEvent.call(chart, evt); + if (triggerOnlyOnChange === false || angular.equals(lastState, activePoints) === false) { + lastState = activePoints; + scope[action](activePoints, evt, chart); + scope.$apply(); + } + } + }; + } + + function getColours (type, scope) { + var colours = angular.copy(scope.colours || + ChartJs.getOptions(type).colours || + Chart.defaults.global.colours + ); + while (colours.length < scope.data.length) { + colours.push(scope.getColour()); + } + return colours.map(convertColour); + } + + function convertColour (colour) { + if (typeof colour === 'object' && colour !== null) return colour; + if (typeof colour === 'string' && colour[0] === '#') return getColour(hexToRgb(colour.substr(1))); + return getRandomColour(); + } + + function getRandomColour () { + var colour = [getRandomInt(0, 255), getRandomInt(0, 255), getRandomInt(0, 255)]; + return getColour(colour); + } + + function getColour (colour) { + return { + fillColor: rgba(colour, 0.2), + strokeColor: rgba(colour, 1), + pointColor: rgba(colour, 1), + pointStrokeColor: '#fff', + pointHighlightFill: '#fff', + pointHighlightStroke: rgba(colour, 0.8) + }; + } + + function getRandomInt (min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; + } + + function rgba (colour, alpha) { + if (usingExcanvas) { + // rgba not supported by IE8 + return 'rgb(' + colour.join(',') + ')'; + } else { + return 'rgba(' + colour.concat(alpha).join(',') + ')'; + } + } + + // Credit: http://stackoverflow.com/a/11508164/1190235 + function hexToRgb (hex) { + var bigint = parseInt(hex, 16), + r = (bigint >> 16) & 255, + g = (bigint >> 8) & 255, + b = bigint & 255; + + return [r, g, b]; + } + + function getDataSets (labels, data, series, colours) { + return { + labels: labels, + datasets: data.map(function (item, i) { + return angular.extend({}, colours[i], { + label: series[i], + data: item + }); + }) + }; + } + + function getData (labels, data, colours) { + return labels.map(function (label, i) { + return angular.extend({}, colours[i], { + label: label, + value: data[i], + color: colours[i].strokeColor, + highlight: colours[i].pointHighlightStroke + }); + }); + } + + function setLegend (elem, chart) { + var $parent = elem.parent(), + $oldLegend = $parent.find('chart-legend'), + legend = '' + chart.generateLegend() + ''; + if ($oldLegend.length) $oldLegend.replaceWith(legend); + else $parent.append(legend); + } + + function updateChart (chart, values, scope, elem) { + if (Array.isArray(scope.data[0])) { + chart.datasets.forEach(function (dataset, i) { + (dataset.points || dataset.bars).forEach(function (dataItem, j) { + dataItem.value = values[i][j]; + }); + }); + } else { + chart.segments.forEach(function (segment, i) { + segment.value = values[i]; + }); + } + chart.update(); + scope.$emit('update', chart); + if (scope.legend && scope.legend !== 'false') setLegend(elem, chart); + } + + function isEmpty (value) { + return ! value || + (Array.isArray(value) && ! value.length) || + (typeof value === 'object' && ! Object.keys(value).length); + } + + function isResponsive (type, scope) { + var options = angular.extend({}, Chart.defaults.global, ChartJs.getOptions(type), scope.options); + return options.responsive; + } + } +})); diff --git a/perfdash/www/index.html b/perfdash/www/index.html new file mode 100644 index 000000000000..037930bdec93 --- /dev/null +++ b/perfdash/www/index.html @@ -0,0 +1,52 @@ + + + + + + + +
+ +

Performance Dashboard

+ + +
+
+
+ + + + {{testName}} + + + +
+
+ + + + {{item}} + + + +
+ + +
+

+ Click a data point to see build logs. CTRL+click anywhere to trim outliers. +

+ + + + + + + + + +
+ + diff --git a/perfdash/www/script.js b/perfdash/www/script.js new file mode 100644 index 000000000000..f6caed2e30ce --- /dev/null +++ b/perfdash/www/script.js @@ -0,0 +1,162 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +var app = angular.module('PerfDashApp', ['ngMaterial', 'chart.js']); + +var PerfDashApp = function(http, scope) { + this.http = http; + this.scope = scope; + this.testNames = []; + this.onClick = this.onClickInternal_.bind(this); + this.cap = 0; +}; + + +PerfDashApp.prototype.onClickInternal_ = function(data, evt, chart) { + console.log(this, data, evt, chart); + if (evt.ctrlKey) { + this.cap = (chart.scale.min + chart.scale.max) / 2; + this.labelChanged(); + return; + } + + // Get location + // TODO(random-liu): Make the URL configurable if we want to support more + // buckets in the future. + window.open("https://k8s-gubernator.appspot.com/build/kubernetes-jenkins/logs/" + this.job + "/" + data[0].label + "/", "_blank"); +}; + +// Fetch data from the server and update the data to display +PerfDashApp.prototype.refresh = function() { + this.http.get("api") + .success(function(data) { + this.testNames = Object.keys(data); + this.testName = this.testNames[0]; + this.allData = data; + this.testNameChanged(); + }.bind(this)) + .error(function(data) { + console.log("error fetching result"); + console.log(data); + }); +}; + +// Update the data to graph, using selected labels +PerfDashApp.prototype.labelChanged = function() { + this.seriesData = []; + this.series = []; + result = this.getData(this.selectedLabels); + if (result.length <= 0) { + return; + } + // All the unit should be the same + this.options = {scaleLabel: "<%=value%> "+result[0].unit}; + // Start with higher percentiles, since their values are usually strictly higher + // than lower percentiles, which avoids obscuring graph data. It also orders data + // in the onHover labels more naturally. + var seriesLabels = Object.keys(result[0].data); + seriesLabels.sort(); + seriesLabels.reverse(); + angular.forEach(seriesLabels, function(name) { + this.seriesData.push(this.getStream(result, name)); + this.series.push(name); + }, this); + this.cap = 0; +}; + +// Update the data to graph, using the selected testName +PerfDashApp.prototype.testNameChanged = function() { + this.data = this.allData[this.testName].builds; + this.job = this.allData[this.testName].job; + this.builds = this.getBuilds(); + this.labels = this.getLabels(); + this.labelChanged(); +}; + +// Get all of the builds for the data set (e.g. build numbers) +PerfDashApp.prototype.getBuilds = function() { + return Object.keys(this.data) +}; + +// Get the set of all labels (e.g. 'resources', 'verbs') in the data set +PerfDashApp.prototype.getLabels = function() { + var set = {}; + angular.forEach(this.data, function(items, build) { + angular.forEach(items, function(item) { + angular.forEach(item.labels, function(label, name) { + if (set[name] == undefined) { + set[name] = {} + } + set[name][label] = true + }); + }); + }); + + this.selectedLabels = {} + var labels = {}; + angular.forEach(set, function(items, name) { + labels[name] = []; + angular.forEach(items, function(ignore, item) { + if (this.selectedLabels[name] == undefined) { + this.selectedLabels[name] = item; + } + labels[name].push(item) + }, this); + }, this); + return labels; +}; + +// Extract a time series of data for specific labels +PerfDashApp.prototype.getData = function(labels) { + var result = []; + angular.forEach(this.data, function(items, build) { + angular.forEach(items, function(item) { + var match = true; + angular.forEach(labels, function(label, name) { + if (item.labels[name] != label) { + match = false; + } + }); + if (match) { + result.push(item); + } + }); + }); + return result; +}; + +// Given a slice of data, turn it into a time series of numbers +// 'data' is an array of APICallLatency objects +// 'stream' is a selector for latency data, (e.g. 'Perc50') +PerfDashApp.prototype.getStream = function(data, stream) { + var result = []; + angular.forEach(data, function(value) { + var x = value.data[stream]; + if (this.cap != 0 && x > this.cap) { + x = this.cap; + } + result.push(x); + }, this); + return result; +}; + +app.controller('AppCtrl', ['$scope', '$http', '$interval', function($scope, $http, $interval) { + $scope.controller = new PerfDashApp($http, $scope); + $scope.controller.refresh(); + + // Refresh every 5 min. The data only refreshes every 10 minutes on the server + $interval($scope.controller.refresh.bind($scope.controller), 300000) +}]); diff --git a/vendor/BUILD b/vendor/BUILD index 316a7ea37b38..e0d1772d0b5d 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -9,6 +9,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//vendor/bitbucket.org/bertimus9/systemstat:all-srcs", "//vendor/bitbucket.org/ww/goautoneg:all-srcs", "//vendor/cloud.google.com/go/compute/metadata:all-srcs", "//vendor/cloud.google.com/go/iam:all-srcs", @@ -32,6 +33,49 @@ filegroup( "//vendor/github.com/beorn7/perks/quantile:all-srcs", "//vendor/github.com/boltdb/bolt:all-srcs", "//vendor/github.com/bwmarrin/snowflake:all-srcs", + "//vendor/github.com/codegangsta/negroni:all-srcs", + "//vendor/github.com/coreos/bbolt:all-srcs", + "//vendor/github.com/coreos/etcd/alarm:all-srcs", + "//vendor/github.com/coreos/etcd/auth:all-srcs", + "//vendor/github.com/coreos/etcd/client:all-srcs", + "//vendor/github.com/coreos/etcd/clientv3:all-srcs", + "//vendor/github.com/coreos/etcd/compactor:all-srcs", + "//vendor/github.com/coreos/etcd/discovery:all-srcs", + "//vendor/github.com/coreos/etcd/error:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver:all-srcs", + "//vendor/github.com/coreos/etcd/lease:all-srcs", + "//vendor/github.com/coreos/etcd/mvcc:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/adt:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/contention:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/cpuutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/crc:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/fileutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/httputil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/idutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/ioutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/logutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/monotime:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/netutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/pathutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/pbutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/srv:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/transport:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/types:all-srcs", + "//vendor/github.com/coreos/etcd/pkg/wait:all-srcs", + "//vendor/github.com/coreos/etcd/raft:all-srcs", + "//vendor/github.com/coreos/etcd/rafthttp:all-srcs", + "//vendor/github.com/coreos/etcd/snap:all-srcs", + "//vendor/github.com/coreos/etcd/store:all-srcs", + "//vendor/github.com/coreos/etcd/version:all-srcs", + "//vendor/github.com/coreos/etcd/wal:all-srcs", + "//vendor/github.com/coreos/go-semver/semver:all-srcs", + "//vendor/github.com/coreos/go-systemd/journal:all-srcs", + "//vendor/github.com/coreos/pkg/capnslog:all-srcs", + "//vendor/github.com/davecgh/go-spew/spew:all-srcs", + "//vendor/github.com/dgrijalva/jwt-go:all-srcs", "//vendor/github.com/docker/distribution/digestset:all-srcs", "//vendor/github.com/docker/distribution/reference:all-srcs", "//vendor/github.com/docker/docker/api:all-srcs", @@ -40,7 +84,11 @@ filegroup( "//vendor/github.com/docker/go-connections/sockets:all-srcs", "//vendor/github.com/docker/go-connections/tlsconfig:all-srcs", "//vendor/github.com/docker/go-units:all-srcs", + "//vendor/github.com/elazarl/goproxy:all-srcs", "//vendor/github.com/emicklei/go-restful:all-srcs", + "//vendor/github.com/evanphx/json-patch:all-srcs", + "//vendor/github.com/garyburd/redigo/internal:all-srcs", + "//vendor/github.com/garyburd/redigo/redis:all-srcs", "//vendor/github.com/ghodss/yaml:all-srcs", "//vendor/github.com/go-ini/ini:all-srcs", "//vendor/github.com/go-openapi/jsonpointer:all-srcs", @@ -63,7 +111,14 @@ filegroup( "//vendor/github.com/google/go-querystring/query:all-srcs", "//vendor/github.com/google/gofuzz:all-srcs", "//vendor/github.com/googleapis/gax-go:all-srcs", + "//vendor/github.com/googleapis/gnostic/OpenAPIv2:all-srcs", + "//vendor/github.com/googleapis/gnostic/compiler:all-srcs", + "//vendor/github.com/googleapis/gnostic/extensions:all-srcs", + "//vendor/github.com/gorilla/context:all-srcs", + "//vendor/github.com/gorilla/mux:all-srcs", "//vendor/github.com/gregjones/httpcache:all-srcs", + "//vendor/github.com/howeyc/gopass:all-srcs", + "//vendor/github.com/imdario/mergo:all-srcs", "//vendor/github.com/inconshreveable/mousetrap:all-srcs", "//vendor/github.com/influxdata/influxdb/client/v2:all-srcs", "//vendor/github.com/influxdata/influxdb/models:all-srcs", @@ -72,6 +127,9 @@ filegroup( "//vendor/github.com/jinzhu/inflection:all-srcs", "//vendor/github.com/jmank88/nuts:all-srcs", "//vendor/github.com/jmespath/go-jmespath:all-srcs", + "//vendor/github.com/jonboulle/clockwork:all-srcs", + "//vendor/github.com/json-iterator/go:all-srcs", + "//vendor/github.com/juju/ratelimit:all-srcs", "//vendor/github.com/mailru/easyjson/buffer:all-srcs", "//vendor/github.com/mailru/easyjson/jlexer:all-srcs", "//vendor/github.com/mailru/easyjson/jwriter:all-srcs", @@ -81,6 +139,7 @@ filegroup( "//vendor/github.com/opencontainers/go-digest:all-srcs", "//vendor/github.com/opencontainers/image-spec/specs-go:all-srcs", "//vendor/github.com/pelletier/go-toml:all-srcs", + "//vendor/github.com/petar/GoLLRB/llrb:all-srcs", "//vendor/github.com/peterbourgon/diskv:all-srcs", "//vendor/github.com/pkg/errors:all-srcs", "//vendor/github.com/prometheus/client_golang/prometheus:all-srcs", @@ -96,6 +155,12 @@ filegroup( "//vendor/github.com/sirupsen/logrus:all-srcs", "//vendor/github.com/spf13/cobra:all-srcs", "//vendor/github.com/spf13/pflag:all-srcs", + "//vendor/github.com/ugorji/go/codec:all-srcs", + "//vendor/github.com/xiang90/probing:all-srcs", + "//vendor/github.com/xyproto/pinterface:all-srcs", + "//vendor/github.com/xyproto/simpleredis:all-srcs", + "//vendor/golang.org/x/crypto/bcrypt:all-srcs", + "//vendor/golang.org/x/crypto/blowfish:all-srcs", "//vendor/golang.org/x/crypto/curve25519:all-srcs", "//vendor/golang.org/x/crypto/ed25519:all-srcs", "//vendor/golang.org/x/crypto/ssh:all-srcs", @@ -110,10 +175,19 @@ filegroup( "//vendor/golang.org/x/sync/errgroup:all-srcs", "//vendor/golang.org/x/sys/unix:all-srcs", "//vendor/golang.org/x/sys/windows:all-srcs", + "//vendor/golang.org/x/text/collate:all-srcs", + "//vendor/golang.org/x/text/internal/colltab:all-srcs", + "//vendor/golang.org/x/text/internal/gen:all-srcs", + "//vendor/golang.org/x/text/internal/tag:all-srcs", + "//vendor/golang.org/x/text/internal/triegen:all-srcs", + "//vendor/golang.org/x/text/internal/ucd:all-srcs", + "//vendor/golang.org/x/text/language:all-srcs", "//vendor/golang.org/x/text/secure/bidirule:all-srcs", "//vendor/golang.org/x/text/transform:all-srcs", "//vendor/golang.org/x/text/unicode/bidi:all-srcs", + "//vendor/golang.org/x/text/unicode/cldr:all-srcs", "//vendor/golang.org/x/text/unicode/norm:all-srcs", + "//vendor/golang.org/x/text/unicode/rangetable:all-srcs", "//vendor/golang.org/x/text/width:all-srcs", "//vendor/golang.org/x/tools/go/gcexportdata:all-srcs", "//vendor/golang.org/x/tools/go/gcimporter15:all-srcs", @@ -124,6 +198,7 @@ filegroup( "//vendor/google.golang.org/api/option:all-srcs", "//vendor/google.golang.org/api/storage/v1:all-srcs", "//vendor/google.golang.org/api/transport/http:all-srcs", + "//vendor/google.golang.org/appengine:all-srcs", "//vendor/google.golang.org/genproto/googleapis/api/annotations:all-srcs", "//vendor/google.golang.org/genproto/googleapis/iam/v1:all-srcs", "//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs", @@ -131,9 +206,43 @@ filegroup( "//vendor/gopkg.in/inf.v0:all-srcs", "//vendor/gopkg.in/robfig/cron.v2:all-srcs", "//vendor/gopkg.in/yaml.v2:all-srcs", + "//vendor/k8s.io/api/admission/v1beta1:all-srcs", + "//vendor/k8s.io/api/admissionregistration/v1alpha1:all-srcs", + "//vendor/k8s.io/api/admissionregistration/v1beta1:all-srcs", + "//vendor/k8s.io/api/apps/v1:all-srcs", + "//vendor/k8s.io/api/apps/v1beta1:all-srcs", + "//vendor/k8s.io/api/apps/v1beta2:all-srcs", + "//vendor/k8s.io/api/authentication/v1:all-srcs", + "//vendor/k8s.io/api/authentication/v1beta1:all-srcs", + "//vendor/k8s.io/api/authorization/v1:all-srcs", + "//vendor/k8s.io/api/authorization/v1beta1:all-srcs", + "//vendor/k8s.io/api/autoscaling/v1:all-srcs", + "//vendor/k8s.io/api/autoscaling/v2beta1:all-srcs", + "//vendor/k8s.io/api/batch/v1:all-srcs", + "//vendor/k8s.io/api/batch/v1beta1:all-srcs", + "//vendor/k8s.io/api/batch/v2alpha1:all-srcs", + "//vendor/k8s.io/api/certificates/v1beta1:all-srcs", "//vendor/k8s.io/api/core/v1:all-srcs", + "//vendor/k8s.io/api/events/v1beta1:all-srcs", + "//vendor/k8s.io/api/extensions/v1beta1:all-srcs", + "//vendor/k8s.io/api/networking/v1:all-srcs", + "//vendor/k8s.io/api/policy/v1beta1:all-srcs", + "//vendor/k8s.io/api/rbac/v1:all-srcs", + "//vendor/k8s.io/api/rbac/v1alpha1:all-srcs", + "//vendor/k8s.io/api/rbac/v1beta1:all-srcs", + "//vendor/k8s.io/api/scheduling/v1alpha1:all-srcs", + "//vendor/k8s.io/api/settings/v1alpha1:all-srcs", + "//vendor/k8s.io/api/storage/v1:all-srcs", + "//vendor/k8s.io/api/storage/v1alpha1:all-srcs", + "//vendor/k8s.io/api/storage/v1beta1:all-srcs", + "//vendor/k8s.io/apiextensions-apiserver/pkg/features:all-srcs", + "//vendor/k8s.io/apimachinery/pkg/api/errors:all-srcs", + "//vendor/k8s.io/apimachinery/pkg/api/meta:all-srcs", "//vendor/k8s.io/apimachinery/pkg/api/resource:all-srcs", + "//vendor/k8s.io/apimachinery/pkg/apimachinery:all-srcs", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:all-srcs", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:all-srcs", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:all-srcs", "//vendor/k8s.io/apimachinery/pkg/conversion:all-srcs", "//vendor/k8s.io/apimachinery/pkg/fields:all-srcs", "//vendor/k8s.io/apimachinery/pkg/labels:all-srcs", @@ -142,19 +251,69 @@ filegroup( "//vendor/k8s.io/apimachinery/pkg/types:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/clock:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/errors:all-srcs", + "//vendor/k8s.io/apimachinery/pkg/util/framer:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/intstr:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/json:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/net:all-srcs", + "//vendor/k8s.io/apimachinery/pkg/util/rand:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/runtime:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/sets:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/validation:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/wait:all-srcs", "//vendor/k8s.io/apimachinery/pkg/util/yaml:all-srcs", + "//vendor/k8s.io/apimachinery/pkg/version:all-srcs", "//vendor/k8s.io/apimachinery/pkg/watch:all-srcs", "//vendor/k8s.io/apimachinery/third_party/forked/golang/reflect:all-srcs", + "//vendor/k8s.io/apiserver/pkg/features:all-srcs", + "//vendor/k8s.io/apiserver/pkg/util/feature:all-srcs", "//vendor/k8s.io/apiserver/pkg/util/flag:all-srcs", + "//vendor/k8s.io/apiserver/pkg/util/logs:all-srcs", + "//vendor/k8s.io/client-go/discovery:all-srcs", + "//vendor/k8s.io/client-go/kubernetes:all-srcs", + "//vendor/k8s.io/client-go/pkg/version:all-srcs", + "//vendor/k8s.io/client-go/rest:all-srcs", + "//vendor/k8s.io/client-go/tools/auth:all-srcs", + "//vendor/k8s.io/client-go/tools/clientcmd:all-srcs", + "//vendor/k8s.io/client-go/tools/metrics:all-srcs", + "//vendor/k8s.io/client-go/tools/reference:all-srcs", + "//vendor/k8s.io/client-go/transport:all-srcs", + "//vendor/k8s.io/client-go/util/cert:all-srcs", + "//vendor/k8s.io/client-go/util/flowcontrol:all-srcs", + "//vendor/k8s.io/client-go/util/homedir:all-srcs", + "//vendor/k8s.io/client-go/util/integer:all-srcs", "//vendor/k8s.io/contrib/test-utils/utils:all-srcs", "//vendor/k8s.io/kube-openapi/pkg/common:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/api/legacyscheme:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/api/ref:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/admissionregistration:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/apps:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/authentication:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/authorization:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/autoscaling:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/batch:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/certificates:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/componentconfig:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/core:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/events:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/extensions:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/networking:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/policy:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/rbac:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/scheduling:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/settings:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/apis/storage:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/features:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/kubelet/apis:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/master/ports:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/util/parsers:all-srcs", + "//vendor/k8s.io/kubernetes/pkg/util/pointer:all-srcs", + "//vendor/k8s.io/kubernetes/test/e2e/perftype:all-srcs", + "//vendor/k8s.io/kubernetes/test/images/net/common:all-srcs", + "//vendor/k8s.io/kubernetes/test/images/net/nat:all-srcs", + "//vendor/k8s.io/kubernetes/test/images/resource-consumer/common:all-srcs", + "//vendor/k8s.io/kubernetes/third_party/forked/etcd221/pkg/fileutil:all-srcs", + "//vendor/k8s.io/kubernetes/third_party/forked/etcd221/wal:all-srcs", ], tags = ["automanaged"], visibility = ["//visibility:public"], diff --git a/vendor/bitbucket.org/bertimus9/systemstat/.gitignore b/vendor/bitbucket.org/bertimus9/systemstat/.gitignore new file mode 100644 index 000000000000..fa49007ba62f --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*.swp +*.swo diff --git a/vendor/bitbucket.org/bertimus9/systemstat/AUTHORS b/vendor/bitbucket.org/bertimus9/systemstat/AUTHORS new file mode 100644 index 000000000000..50699b8a56c4 --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/AUTHORS @@ -0,0 +1 @@ +[Phillip](http://bitbucket.org/bertimus9) \ No newline at end of file diff --git a/vendor/bitbucket.org/bertimus9/systemstat/BUILD b/vendor/bitbucket.org/bertimus9/systemstat/BUILD new file mode 100644 index 000000000000..5bec8520e75a --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/BUILD @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "systemstat.go", + "utils.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "systemstat_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "systemstat_ex.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "systemstat_ex.go", + ], + "//conditions:default": [], + }), + importpath = "bitbucket.org/bertimus9/systemstat", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/bitbucket.org/bertimus9/systemstat/LICENSE b/vendor/bitbucket.org/bertimus9/systemstat/LICENSE new file mode 100644 index 000000000000..8bff97186441 --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Phillip Bond + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/bitbucket.org/bertimus9/systemstat/Makefile b/vendor/bitbucket.org/bertimus9/systemstat/Makefile new file mode 100644 index 000000000000..5a4126d90454 --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/Makefile @@ -0,0 +1,8 @@ +test: + gofmt -s -w . + go test ./... + go get bitbucket.org/bertimus9/systemstat + +coverage: + go get github.com/axw/gocov/gocov + gocov test . | gocov report diff --git a/vendor/bitbucket.org/bertimus9/systemstat/README.md b/vendor/bitbucket.org/bertimus9/systemstat/README.md new file mode 100644 index 000000000000..196982cb35e3 --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/README.md @@ -0,0 +1,69 @@ +# systemstat + +[Documentation online](http://godoc.org/bitbucket.org/bertimus9/systemstat) + +**systemstat** is a package written in Go generated automatically by `gobi`. + +**systemstat** allows you to add system statistics to your go program; it +currently polls the linux kernel for CPU usage, free/used memory and swap +sizes, and uptime for your go process, as well as the system you're running it +on, and the system load. It can be used to make a crippled version of top that +monitors the current go process and ignores other processes and the number of +users with ttys. See the examples directory for go-top.go, which is my attempt +at a top clone. Bear in mind that the intention of **systemstat** is to allow +your process to monitor itself and it's environment, not to replace top. + +## Install (with GOPATH set on your machine) +---------- + +* Step 1: Get the `systemstat` package + +``` +go get bitbucket.org/bertimus9/systemstat +``` + +* Step 2 (Optional): Run tests + +``` +$ go test -v bitbucket.org/bertimus9/systemstat +``` + +* Step 3 (Optional): Run example + +```bash +$ cd to the first directory in your $GOPATH +$ cd src/bitbucket.org/bertimus9/systemstat +$ go run examples/go-top.go +``` + +##Usage +---------- +``` +package main + +import ( + "bitbucket.org/bertimus9/systemstat" + "fmt" +) + +var sample systemstat.MemSample + +// This example shows how easy it is to get memory information +func main() { + sample = systemstat.GetMemSample() + fmt.Println("Total available RAM in kb:", sample.MemTotal, "k total") + fmt.Println("Used RAM in kb:", sample.MemUsed, "k used") + fmt.Println("Free RAM in kb:", sample.MemFree, "k free") + fmt.Printf("The output is similar to, but somewhat different than:\n\ttop -n1 | grep Mem:\n") +} +``` + +##License +---------- + +Copyright (c) 2013 Phillip Bond + +Licensed under the MIT License + +see file LICENSE + diff --git a/vendor/bitbucket.org/bertimus9/systemstat/VERSION b/vendor/bitbucket.org/bertimus9/systemstat/VERSION new file mode 100644 index 000000000000..8a9ecc2ea99d --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/VERSION @@ -0,0 +1 @@ +0.0.1 \ No newline at end of file diff --git a/vendor/bitbucket.org/bertimus9/systemstat/systemstat.go b/vendor/bitbucket.org/bertimus9/systemstat/systemstat.go new file mode 100644 index 000000000000..7ac1f6168358 --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/systemstat.go @@ -0,0 +1,141 @@ +// Copyright (c) 2013 Phillip Bond +// Licensed under the MIT License +// see file LICENSE + +package systemstat + +import ( + "time" +) + +// CPUSample is an object that represents the breakdown of time spent by the +// CPU in various types of tasks. Two CPUSamples are required to find the +// average usage over time, represented by the CPUAverage object. The CPUSample +// is taken from the line "cpu" from /proc/stat in the Linux kernel. +// +// Summarized from the proc(5) man page: +// /proc/stat : +// kernel/system statistics. Varies with architecture. +type CPUSample struct { + User uint64 // time spent in user mode + Nice uint64 // time spent in user mode with low priority (nice) + System uint64 // time spent in system mode + Idle uint64 // time spent in the idle task + Iowait uint64 // time spent waiting for I/O to complete (since Linux 2.5.41) + Irq uint64 // time spent servicing interrupts (since 2.6.0-test4) + SoftIrq uint64 // time spent servicing softirqs (since 2.6.0-test4) + Steal uint64 // time spent in other OSes when running in a virtualized environment + Guest uint64 // time spent running a virtual CPU for guest operating systems under the control of the Linux kernel. + Name string // name of the line in /proc/stat; cpu, cpu1, etc + Time time.Time // when the sample was taken + Total uint64 // total of all time fields +} + +type ProcCPUSample struct { + User float64 // time spent in user mode + System float64 // time spent in system mode + Time time.Time // when the sample was taken + Total float64 // total of all time fields + ProcMemUsedK int64 +} + +type ProcCPUAverage struct { + UserPct float64 // time spent in user mode + SystemPct float64 // time spent in system mode + TotalPct float64 // total of all time fields + PossiblePct float64 // total of all time fields + CumulativeTotalPct float64 // total of all time throughout process life + Time time.Time // when the sample was taken + Seconds float64 // how many seconds between the two samples +} + +// SimpleCPUAverage is an object that represents the average cpu usage over a +// time period. It is calculated by taking the difference between two +// CPUSamples (whose units are clock ticks), dividing by the number of elapsed +// ticks between the samples, and converting to a percent. It is a simplified version of the CPUAverage in that it only accounts for time in the Idle task and all other time (Busy). +type SimpleCPUAverage struct { + BusyPct float64 // percent of time spent by CPU performing all non-idle tasks + IdlePct float64 // percent of time spent by CPU in the idle task +} + +// CPUAverage is an object that represents the average cpu usage over a +// time period. It is calculated by taking the difference between two +// CPUSamples (whose units are clock ticks), dividing by the number of elapsed +// ticks between the samples, and converting to a percent. +type CPUAverage struct { + UserPct float64 + NicePct float64 + SystemPct float64 + IdlePct float64 + IowaitPct float64 + IrqPct float64 + SoftIrqPct float64 + StealPct float64 + GuestPct float64 + Time time.Time + Seconds float64 // how many seconds between the two samples +} + +type MemSample struct { + Buffers uint64 + Cached uint64 + MemTotal uint64 + MemUsed uint64 + MemFree uint64 + SwapTotal uint64 + SwapUsed uint64 + SwapFree uint64 + Time time.Time +} + +type LoadAvgSample struct { + One float64 + Five float64 + Fifteen float64 + Time time.Time +} + +type UptimeSample struct { + Uptime float64 + Time time.Time +} + +// GetCPUAverage returns the average cpu usage between two CPUSamples. +func GetCPUAverage(first CPUSample, second CPUSample) CPUAverage { + return getCPUAverage(first, second) +} + +// GetSimpleCPUAverage returns an aggregated average cpu usage between two CPUSamples. +func GetSimpleCPUAverage(first CPUSample, second CPUSample) SimpleCPUAverage { + return getSimpleCPUAverage(first, second) +} + +// GetProcCPUAverage returns the average cpu usage of this running process +func GetProcCPUAverage(first ProcCPUSample, second ProcCPUSample, procUptime float64) (avg ProcCPUAverage) { + return getProcCPUAverage(first, second, procUptime) +} + +// GetCPUSample takes a snapshot of kernel statistics from the /proc/stat file. +func GetCPUSample() (samp CPUSample) { + return getCPUSample("/proc/stat") +} + +// GetProcCPUSample takes a snapshot of kernel statistics from the /proc/stat file. +func GetProcCPUSample() (samp ProcCPUSample) { + return getProcCPUSample() +} + +// GetUptime takes a snapshot of load info from the /proc/loadavg file. +func GetUptime() (samp UptimeSample) { + return getUptime("/proc/uptime") +} + +// GetLoadAvgSample takes a snapshot of load info from the /proc/loadavg file. +func GetLoadAvgSample() (samp LoadAvgSample) { + return getLoadAvgSample("/proc/loadavg") +} + +// GetMemSample takes a snapshot of memory info from the /proc/meminfo file. +func GetMemSample() (samp MemSample) { + return getMemSample("/proc/meminfo") +} diff --git a/vendor/bitbucket.org/bertimus9/systemstat/systemstat_ex.go b/vendor/bitbucket.org/bertimus9/systemstat/systemstat_ex.go new file mode 100644 index 000000000000..4b9f2fd758ca --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/systemstat_ex.go @@ -0,0 +1,49 @@ +// Copyright (c) 2013 Phillip Bond +// Licensed under the MIT License +// see file LICENSE + +// +build !linux + +package systemstat + +import ( + "syscall" + "time" +) + +func getUptime(procfile string) (uptime UptimeSample) { + notImplemented("getUptime") + uptime.Time = time.Now() + return +} + +func getLoadAvgSample(procfile string) (samp LoadAvgSample) { + notImplemented("getLoadAvgSample") + samp.Time = time.Now() + return +} + +func getMemSample(procfile string) (samp MemSample) { + notImplemented("getMemSample") + samp.Time = time.Now() + return +} + +func getProcCPUSample() (s ProcCPUSample) { + var processInfo syscall.Rusage + syscall.Getrusage(syscall.RUSAGE_SELF, &processInfo) + + s.Time = time.Now() + s.ProcMemUsedK = int64(processInfo.Maxrss) + s.User = float64(processInfo.Utime.Usec)/1000000 + float64(processInfo.Utime.Sec) + s.System = float64(processInfo.Stime.Usec)/1000000 + float64(processInfo.Stime.Sec) + s.Total = s.User + s.System + + return +} + +func getCPUSample(procfile string) (samp CPUSample) { + notImplemented("getCPUSample") + samp.Time = time.Now() + return +} diff --git a/vendor/bitbucket.org/bertimus9/systemstat/systemstat_linux.go b/vendor/bitbucket.org/bertimus9/systemstat/systemstat_linux.go new file mode 100644 index 000000000000..b4abdd5b4259 --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/systemstat_linux.go @@ -0,0 +1,163 @@ +// Copyright (c) 2013 Phillip Bond +// Licensed under the MIT License +// see file LICENSE + +// +build linux + +package systemstat + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "strconv" + "strings" + "syscall" + "time" +) + +func getUptime(procfile string) (uptime UptimeSample) { + // read in whole uptime file with cpu usage information ;"/proc/uptime" + contents, err := ioutil.ReadFile(procfile) + uptime.Time = time.Now() + if err != nil { + return + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + line, _, err := reader.ReadLine() + fields := strings.Fields(string(line)) + + val, numerr := strconv.ParseFloat(fields[0], 64) + if numerr != nil { + return + } + uptime.Uptime = val + + return +} + +func getLoadAvgSample(procfile string) (samp LoadAvgSample) { + // read in whole loadavg file with cpu usage information ;"/proc/loadavg" + contents, err := ioutil.ReadFile(procfile) + samp.Time = time.Now() + if err != nil { + return + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + line, _, err := reader.ReadLine() + fields := strings.Fields(string(line)) + for i := 0; i < 3; i++ { + val, numerr := strconv.ParseFloat(fields[i], 64) + if numerr != nil { + return + } + switch i { + case 0: + samp.One = val + case 1: + samp.Five = val + case 2: + samp.Fifteen = val + } + } + + return +} + +func getMemSample(procfile string) (samp MemSample) { + want := map[string]bool{ + "Buffers:": true, + "Cached:": true, + "MemTotal:": true, + "MemFree:": true, + "MemUsed:": true, + "SwapTotal:": true, + "SwapFree:": true, + "SwapUsed:": true} + + // read in whole meminfo file with cpu usage information ;"/proc/meminfo" + contents, err := ioutil.ReadFile(procfile) + samp.Time = time.Now() + if err != nil { + return + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + for { + line, _, err := reader.ReadLine() + if err == io.EOF { + break + } + + fields := strings.Fields(string(line)) + fieldName := fields[0] + + _, ok := want[fieldName] + if ok && len(fields) == 3 { + val, numerr := strconv.ParseUint(fields[1], 10, 64) + if numerr != nil { + return + } + switch fieldName { + case "Buffers:": + samp.Buffers = val + case "Cached:": + samp.Cached = val + case "MemTotal:": + samp.MemTotal = val + case "MemFree:": + samp.MemFree = val + case "SwapTotal:": + samp.SwapTotal = val + case "SwapFree:": + samp.SwapFree = val + } + } + } + samp.MemUsed = samp.MemTotal - samp.MemFree + samp.SwapUsed = samp.SwapTotal - samp.SwapFree + return +} + +func getProcCPUSample() (s ProcCPUSample) { + var processInfo syscall.Rusage + syscall.Getrusage(syscall.RUSAGE_SELF, &processInfo) + + s.Time = time.Now() + s.ProcMemUsedK = int64(processInfo.Maxrss) + s.User = float64(processInfo.Utime.Usec)/1000000 + float64(processInfo.Utime.Sec) + s.System = float64(processInfo.Stime.Usec)/1000000 + float64(processInfo.Stime.Sec) + s.Total = s.User + s.System + + return +} + +func getCPUSample(procfile string) (samp CPUSample) { + // read in whole proc file with cpu usage information ; "/proc/stat" + contents, err := ioutil.ReadFile(procfile) + samp.Time = time.Now() + if err != nil { + return + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + for { + line, _, err := reader.ReadLine() + if err == io.EOF { + break + } + + fields := strings.Fields(string(line)) + + if len(fields) > 0 { + fieldName := fields[0] + if fieldName == "cpu" { + parseCPUFields(fields, &samp) + } + } + } + return +} diff --git a/vendor/bitbucket.org/bertimus9/systemstat/utils.go b/vendor/bitbucket.org/bertimus9/systemstat/utils.go new file mode 100644 index 000000000000..201b97de235b --- /dev/null +++ b/vendor/bitbucket.org/bertimus9/systemstat/utils.go @@ -0,0 +1,90 @@ +package systemstat + +import ( + "log" + "runtime" + "strconv" +) + +func notImplemented(fn string) { + log.Printf("systemstat/%s is not implemented for this OS: %s\n", fn, runtime.GOOS) +} + +func getSimpleCPUAverage(first CPUSample, second CPUSample) (avg SimpleCPUAverage) { + //walltimediff := second.Time.Sub(first.Time) + //dT := float64(first.Total - second.Total) + + dI := float64(second.Idle - first.Idle) + dTot := float64(second.Total - first.Total) + avg.IdlePct = dI / dTot * 100 + avg.BusyPct = (dTot - dI) * 100 / dTot + //log.Printf("cpu idle ticks %f, total ticks %f, idle pct %f, busy pct %f\n", dI, dTot, avg.IdlePct, avg.BusyPct) + return +} + +func subtractAndConvertTicks(first uint64, second uint64) float64 { + return float64(first - second) +} + +func getCPUAverage(first CPUSample, second CPUSample) (avg CPUAverage) { + dTot := float64(second.Total - first.Total) + invQuotient := 100.00 / dTot + + avg.UserPct = subtractAndConvertTicks(second.User, first.User) * invQuotient + avg.NicePct = subtractAndConvertTicks(second.Nice, first.Nice) * invQuotient + avg.SystemPct = subtractAndConvertTicks(second.System, first.System) * invQuotient + avg.IdlePct = subtractAndConvertTicks(second.Idle, first.Idle) * invQuotient + avg.IowaitPct = subtractAndConvertTicks(second.Iowait, first.Iowait) * invQuotient + avg.IrqPct = subtractAndConvertTicks(second.Irq, first.Irq) * invQuotient + avg.SoftIrqPct = subtractAndConvertTicks(second.SoftIrq, first.SoftIrq) * invQuotient + avg.StealPct = subtractAndConvertTicks(second.Steal, first.Steal) * invQuotient + avg.GuestPct = subtractAndConvertTicks(second.Guest, first.Guest) * invQuotient + avg.Time = second.Time + avg.Seconds = second.Time.Sub(first.Time).Seconds() + return +} + +func getProcCPUAverage(first ProcCPUSample, second ProcCPUSample, procUptime float64) (avg ProcCPUAverage) { + dT := second.Time.Sub(first.Time).Seconds() + + avg.UserPct = 100 * (second.User - first.User) / dT + avg.SystemPct = 100 * (second.System - first.System) / dT + avg.TotalPct = 100 * (second.Total - first.Total) / dT + avg.PossiblePct = 100.0 * float64(runtime.NumCPU()) + avg.CumulativeTotalPct = 100 * second.Total / procUptime + avg.Time = second.Time + avg.Seconds = dT + return +} + +func parseCPUFields(fields []string, stat *CPUSample) { + numFields := len(fields) + stat.Name = fields[0] + for i := 1; i < numFields; i++ { + val, numerr := strconv.ParseUint(fields[i], 10, 64) + if numerr != nil { + log.Println("systemstat.parseCPUFields(): Error parsing (field, value): ", i, fields[i]) + } + stat.Total += val + switch i { + case 1: + stat.User = val + case 2: + stat.Nice = val + case 3: + stat.System = val + case 4: + stat.Idle = val + case 5: + stat.Iowait = val + case 6: + stat.Irq = val + case 7: + stat.SoftIrq = val + case 8: + stat.Steal = val + case 9: + stat.Guest = val + } + } +} diff --git a/vendor/github.com/codegangsta/negroni/.gitignore b/vendor/github.com/codegangsta/negroni/.gitignore new file mode 100644 index 000000000000..3f2bc47416e7 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/.gitignore @@ -0,0 +1 @@ +/coverage.txt diff --git a/vendor/github.com/codegangsta/negroni/.travis.yml b/vendor/github.com/codegangsta/negroni/.travis.yml new file mode 100644 index 000000000000..18104334d083 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/.travis.yml @@ -0,0 +1,27 @@ +language: go + +sudo: false +dist: trusty + +go: +- 1.x +- 1.2.x +- 1.3.x +- 1.4.x +- 1.5.x +- 1.6.x +- 1.7.x +- 1.8.x +- master + +before_install: +- find "${GOPATH%%:*}" -name '*.a' -delete +- rm -rf "${GOPATH%%:*}/src/golang.org" +- go get golang.org/x/tools/cover +- go get golang.org/x/tools/cmd/cover + +script: +- go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: +- bash <(curl -s "https://codecov.io/bash") diff --git a/vendor/github.com/codegangsta/negroni/BUILD b/vendor/github.com/codegangsta/negroni/BUILD new file mode 100644 index 000000000000..83409811c416 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "logger.go", + "negroni.go", + "recovery.go", + "response_writer.go", + "response_writer_pusher.go", + "static.go", + ], + importpath = "github.com/codegangsta/negroni", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/codegangsta/negroni/CHANGELOG.md b/vendor/github.com/codegangsta/negroni/CHANGELOG.md new file mode 100644 index 000000000000..f41043188bdd --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/CHANGELOG.md @@ -0,0 +1,64 @@ +# Change Log + +**ATTN**: This project uses [semantic versioning](http://semver.org/). + +## [Unreleased] - + +## [0.3.0] - 2017-11-11 +### Added +- `With()` helper for building a new `Negroni` struct chaining handlers from + existing `Negroni` structs +- Format log output in `Logger` middleware via a configurable `text/template` + string injectable via `.SetFormat`. Added `LoggerDefaultFormat` and + `LoggerDefaultDateFormat` to configure the default template and date format + used by the `Logger` middleware. +- Support for HTTP/2 pusher support via `http.Pusher` interface for Go 1.8+. +- `WrapFunc` to convert `http.HandlerFunc` into a `negroni.Handler` +- `Formatter` field added to `Recovery` middleware to allow configuring how + `panic`s are output. Default of `TextFormatter` (how it was output in + `0.2.0`) used. `HTMLPanicFormatter` also added to allow easy outputing of + `panic`s as HTML. + +### Fixed +- `Written()` correct returns `false` if no response header has been written +- Only implement `http.CloseNotifier` with the `negroni.ResponseWriter` if the + underlying `http.ResponseWriter` implements it (previously would always + implement it and panic if the underlying `http.ResponseWriter` did not. + +### Changed +- Set default status to `0` in the case that no handler writes status -- was + previously `200` (in 0.2.0, before that it was `0` so this reestablishes that + behavior) +- Catch `panic`s thrown by callbacks provided to the `Recovery` handler +- Recovery middleware will set `text/plain` content-type if none is set +- `ALogger` interface to allow custom logger outputs to be used with the + `Logger` middleware. Changes embeded field in `negroni.Logger` from `Logger` + to `ALogger`. +- Default `Logger` middleware output changed to be more structure and verbose + (also now configurable, see `Added`) +- Automatically bind to port specified in `$PORT` in `.Run()` if an address is + not passed in. Fall back to binding to `:8080` if no address specified + (configuable via `DefaultAddress`). +- `PanicHandlerFunc` added to `Recovery` middleware to enhance custom handling + of `panic`s by providing additional information to the handler including the + stack and the `http.Request`. `Recovery.ErrorHandlerFunc` was also added, but + deprecated in favor of the new `PanicHandlerFunc`. + +## [0.2.0] - 2016-05-10 +### Added +- Support for variadic handlers in `New()` +- Added `Negroni.Handlers()` to fetch all of the handlers for a given chain +- Allowed size in `Recovery` handler was bumped to 8k +- `Negroni.UseFunc` to push another handler onto the chain + +### Changed +- Set the status before calling `beforeFuncs` so the information is available to them +- Set default status to `200` in the case that no handler writes status -- was previously `0` +- Panic if `nil` handler is given to `negroni.Use` + +## 0.1.0 - 2013-07-22 +### Added +- Initial implementation. + +[Unreleased]: https://github.com/urfave/negroni/compare/v0.2.0...HEAD +[0.2.0]: https://github.com/urfave/negroni/compare/v0.1.0...v0.2.0 diff --git a/vendor/github.com/codegangsta/negroni/LICENSE b/vendor/github.com/codegangsta/negroni/LICENSE new file mode 100644 index 000000000000..08b5e20ac47d --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Jeremy Saenz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/codegangsta/negroni/README.md b/vendor/github.com/codegangsta/negroni/README.md new file mode 100644 index 000000000000..8c2e9d85990f --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/README.md @@ -0,0 +1,546 @@ +# Negroni +[![GoDoc](https://godoc.org/github.com/urfave/negroni?status.svg)](http://godoc.org/github.com/urfave/negroni) +[![Build Status](https://travis-ci.org/urfave/negroni.svg?branch=master)](https://travis-ci.org/urfave/negroni) +[![codebeat](https://codebeat.co/badges/47d320b1-209e-45e8-bd99-9094bc5111e2)](https://codebeat.co/projects/github.aaakk.us.kg-urfave-negroni) +[![codecov](https://codecov.io/gh/urfave/negroni/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/negroni) + +**Notice:** This is the library formerly known as +`github.com/codegangsta/negroni` -- Github will automatically redirect requests +to this repository, but we recommend updating your references for clarity. + +Negroni is an idiomatic approach to web middleware in Go. It is tiny, +non-intrusive, and encourages use of `net/http` Handlers. + +If you like the idea of [Martini](https://github.com/go-martini/martini), but +you think it contains too much magic, then Negroni is a great fit. + +Language Translations: +* [Deutsch (de_DE)](translations/README_de_de.md) +* [Português Brasileiro (pt_BR)](translations/README_pt_br.md) +* [简体中文 (zh_cn)](translations/README_zh_CN.md) +* [繁體中文 (zh_tw)](translations/README_zh_tw.md) +* [日本語 (ja_JP)](translations/README_ja_JP.md) +* [Français (fr_FR)](translations/README_fr_FR.md) + +## Getting Started + +After installing Go and setting up your +[GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. +We'll call it `server.go`. + + +``` go +package main + +import ( + "fmt" + "net/http" + + "github.com/urfave/negroni" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + n := negroni.Classic() // Includes some default middlewares + n.UseHandler(mux) + + http.ListenAndServe(":3000", n) +} +``` + +Then install the Negroni package (**NOTE**: >= **go 1.1** is required): + +``` +go get github.com/urfave/negroni +``` + +Then run your server: + +``` +go run server.go +``` + +You will now have a Go `net/http` webserver running on `localhost:3000`. + +### Packaging + +If you are on Debian, `negroni` is also available as [a +package](https://packages.debian.org/sid/golang-github-urfave-negroni-dev) that +you can install via `apt install golang-github-urfave-negroni-dev` (at the time +of writing, it is in the `sid` repositories). + +## Is Negroni a Framework? + +Negroni is **not** a framework. It is a middleware-focused library that is +designed to work directly with `net/http`. + +## Routing? + +Negroni is BYOR (Bring your own Router). The Go community already has a number +of great http routers available, and Negroni tries to play well with all of them +by fully supporting `net/http`. For instance, integrating with [Gorilla Mux] +looks like so: + +``` go +router := mux.NewRouter() +router.HandleFunc("/", HomeHandler) + +n := negroni.New(Middleware1, Middleware2) +// Or use a middleware with the Use() function +n.Use(Middleware3) +// router goes last +n.UseHandler(router) + +http.ListenAndServe(":3001", n) +``` + +## `negroni.Classic()` + +`negroni.Classic()` provides some default middleware that is useful for most +applications: + +* [`negroni.Recovery`](#recovery) - Panic Recovery Middleware. +* [`negroni.Logger`](#logger) - Request/Response Logger Middleware. +* [`negroni.Static`](#static) - Static File serving under the "public" + directory. + +This makes it really easy to get started with some useful features from Negroni. + +## Handlers + +Negroni provides a bidirectional middleware flow. This is done through the +`negroni.Handler` interface: + +``` go +type Handler interface { + ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) +} +``` + +If a middleware hasn't already written to the `ResponseWriter`, it should call +the next `http.HandlerFunc` in the chain to yield to the next middleware +handler. This can be used for great good: + +``` go +func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + // do some stuff before + next(rw, r) + // do some stuff after +} +``` + +And you can map it to the handler chain with the `Use` function: + +``` go +n := negroni.New() +n.Use(negroni.HandlerFunc(MyMiddleware)) +``` + +You can also map plain old `http.Handler`s: + +``` go +n := negroni.New() + +mux := http.NewServeMux() +// map your routes + +n.UseHandler(mux) + +http.ListenAndServe(":3000", n) +``` + +## `With()` + +Negroni has a convenience function called `With`. `With` takes one or more +`Handler` instances and returns a new `Negroni` with the combination of the +receiver's handlers and the new handlers. + +```go +// middleware we want to reuse +common := negroni.New() +common.Use(MyMiddleware1) +common.Use(MyMiddleware2) + +// `specific` is a new negroni with the handlers from `common` combined with the +// the handlers passed in +specific := common.With( + SpecificMiddleware1, + SpecificMiddleware2 +) +``` + +## `Run()` + +Negroni has a convenience function called `Run`. `Run` takes an addr string +identical to [`http.ListenAndServe`](https://godoc.org/net/http#ListenAndServe). + + +``` go +package main + +import ( + "github.com/urfave/negroni" +) + +func main() { + n := negroni.Classic() + n.Run(":8080") +} +``` +If no address is provided, the `PORT` environment variable is used instead. +If the `PORT` environment variable is not defined, the default address will be used. +See [Run](https://godoc.org/github.com/urfave/negroni#Negroni.Run) for a complete description. + +In general, you will want to use `net/http` methods and pass `negroni` as a +`Handler`, as this is more flexible, e.g.: + + +``` go +package main + +import ( + "fmt" + "log" + "net/http" + "time" + + "github.com/urfave/negroni" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + n := negroni.Classic() // Includes some default middlewares + n.UseHandler(mux) + + s := &http.Server{ + Addr: ":8080", + Handler: n, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, + } + log.Fatal(s.ListenAndServe()) +} +``` + +## Route Specific Middleware + +If you have a route group of routes that need specific middleware to be +executed, you can simply create a new Negroni instance and use it as your route +handler. + +``` go +router := mux.NewRouter() +adminRoutes := mux.NewRouter() +// add admin routes here + +// Create a new negroni for the admin middleware +router.PathPrefix("/admin").Handler(negroni.New( + Middleware1, + Middleware2, + negroni.Wrap(adminRoutes), +)) +``` + +If you are using [Gorilla Mux], here is an example using a subrouter: + +``` go +router := mux.NewRouter() +subRouter := mux.NewRouter().PathPrefix("/subpath").Subrouter().StrictSlash(true) +subRouter.HandleFunc("/", someSubpathHandler) // "/subpath/" +subRouter.HandleFunc("/:id", someSubpathHandler) // "/subpath/:id" + +// "/subpath" is necessary to ensure the subRouter and main router linkup +router.PathPrefix("/subpath").Handler(negroni.New( + Middleware1, + Middleware2, + negroni.Wrap(subRouter), +)) +``` + +`With()` can be used to eliminate redundancy for middlewares shared across +routes. + +``` go +router := mux.NewRouter() +apiRoutes := mux.NewRouter() +// add api routes here +webRoutes := mux.NewRouter() +// add web routes here + +// create common middleware to be shared across routes +common := negroni.New( + Middleware1, + Middleware2, +) + +// create a new negroni for the api middleware +// using the common middleware as a base +router.PathPrefix("/api").Handler(common.With( + APIMiddleware1, + negroni.Wrap(apiRoutes), +)) +// create a new negroni for the web middleware +// using the common middleware as a base +router.PathPrefix("/web").Handler(common.With( + WebMiddleware1, + negroni.Wrap(webRoutes), +)) +``` + +## Bundled Middleware + +### Static + +This middleware will serve files on the filesystem. If the files do not exist, +it proxies the request to the next middleware. If you want the requests for +non-existent files to return a `404 File Not Found` to the user you should look +at using [http.FileServer](https://golang.org/pkg/net/http/#FileServer) as +a handler. + +Example: + + +``` go +package main + +import ( + "fmt" + "net/http" + + "github.com/urfave/negroni" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + // Example of using a http.FileServer if you want "server-like" rather than "middleware" behavior + // mux.Handle("/public", http.FileServer(http.Dir("/home/public"))) + + n := negroni.New() + n.Use(negroni.NewStatic(http.Dir("/tmp"))) + n.UseHandler(mux) + + http.ListenAndServe(":3002", n) +} +``` + +Will serve files from the `/tmp` directory first, but proxy calls to the next +handler if the request does not match a file on the filesystem. + +### Recovery + +This middleware catches `panic`s and responds with a `500` response code. If +any other middleware has written a response code or body, this middleware will +fail to properly send a 500 to the client, as the client has already received +the HTTP response code. Additionally, an `PanicHandlerFunc` can be attached +to report 500's to an error reporting service such as Sentry or Airbrake. + +Example: + + +``` go +package main + +import ( + "net/http" + + "github.com/urfave/negroni" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + panic("oh no") + }) + + n := negroni.New() + n.Use(negroni.NewRecovery()) + n.UseHandler(mux) + + http.ListenAndServe(":3003", n) +} +``` + +Will return a `500 Internal Server Error` to each request. It will also log the +stack traces as well as print the stack trace to the requester if `PrintStack` +is set to `true` (the default). + +Example with error handler: + +``` go +package main + +import ( + "net/http" + + "github.com/urfave/negroni" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + panic("oh no") + }) + + n := negroni.New() + recovery := negroni.NewRecovery() + recovery.PanicHandlerFunc = reportToSentry + n.Use(recovery) + n.UseHandler(mux) + + http.ListenAndServe(":3003", n) +} + +func reportToSentry(info *negroni.PanicInformation) { + // write code here to report error to Sentry +} +``` + +The middleware simply output the informations on STDOUT by default. +You can customize the output process by using the `SetFormatter()` function. + +You can use also the `HTMLPanicFormatter` to display a pretty HTML when a crash occurs. + + +``` go +package main + +import ( + "net/http" + + "github.com/urfave/negroni" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + panic("oh no") + }) + + n := negroni.New() + recovery := negroni.NewRecovery() + recovery.Formatter = &negroni.HTMLPanicFormatter{} + n.Use(recovery) + n.UseHandler(mux) + + http.ListenAndServe(":3003", n) +} +``` + +## Logger + +This middleware logs each incoming request and response. + +Example: + + +``` go +package main + +import ( + "fmt" + "net/http" + + "github.com/urfave/negroni" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + n := negroni.New() + n.Use(negroni.NewLogger()) + n.UseHandler(mux) + + http.ListenAndServe(":3004", n) +} +``` + +Will print a log similar to: + +``` +[negroni] 2017-10-04T14:56:25+02:00 | 200 | 378µs | localhost:3004 | GET / +``` + +on each request. + +You can also set your own log format by calling the `SetFormat` function. The format is a template string with fields as mentioned in the `LoggerEntry` struct. So, as an example - + +```go +l.SetFormat("[{{.Status}} {{.Duration}}] - {{.Request.UserAgent}}") +``` + +will show something like - `[200 18.263µs] - Go-User-Agent/1.1 ` + +## Third Party Middleware + +Here is a current list of Negroni compatible middlware. Feel free to put up a PR +linking your middleware if you have built one: + +| Middleware | Author | Description | +| -----------|--------|-------------| +| [authz](https://github.com/casbin/negroni-authz) | [Yang Luo](https://github.com/hsluoyz) | ACL, RBAC, ABAC Authorization middlware based on [Casbin](https://github.com/casbin/casbin) | +| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Data binding from HTTP requests into structs | +| [cloudwatch](https://github.com/cvillecsteele/negroni-cloudwatch) | [Colin Steele](https://github.com/cvillecsteele) | AWS cloudwatch metrics middleware | +| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support | +| [csp](https://github.com/awakenetworks/csp) | [Awake Networks](https://github.com/awakenetworks) | [Content Security Policy](https://www.w3.org/TR/CSP2/) (CSP) support | +| [delay](https://github.com/jeffbmartinez/delay) | [Jeff Martinez](https://github.com/jeffbmartinez) | Add delays/latency to endpoints. Useful when testing effects of high latency | +| [New Relic Go Agent](https://github.com/yadvendar/negroni-newrelic-go-agent) | [Yadvendar Champawat](https://github.com/yadvendar) | Official [New Relic Go Agent](https://github.com/newrelic/go-agent) (currently in beta) | +| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime | +| [Graceful](https://github.com/tylerb/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown | +| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | GZIP response compression | +| [JWT Middleware](https://github.com/auth0/go-jwt-middleware) | [Auth0](https://github.com/auth0) | Middleware checks for a JWT on the `Authorization` header on incoming requests and decodes it| +| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger | +| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | oAuth2 middleware | +| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Generate TinySVG, HTML and CSS on the fly | +| [permissions2](https://github.com/xyproto/permissions2) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, users and permissions | +| [prometheus](https://github.com/zbindenren/negroni-prometheus) | [Rene Zbinden](https://github.com/zbindenren) | Easily create metrics endpoint for the [prometheus](http://prometheus.io) instrumentation tool | +| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Render JSON, XML and HTML templates | +| [RestGate](https://github.com/pjebs/restgate) | [Prasanga Siripala](https://github.com/pjebs) | Secure authentication for REST API endpoints | +| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Middleware that implements a few quick security wins | +| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Session Management | +| [stats](https://github.com/thoas/stats) | [Florent Messa](https://github.com/thoas) | Store information about your web application (response time, etc.) | +| [VanGoH](https://github.com/auroratechnologies/vangoh) | [Taylor Wrobel](https://github.com/twrobel3) | Configurable [AWS-Style](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) HMAC authentication middleware | +| [xrequestid](https://github.com/pilu/xrequestid) | [Andrea Franz](https://github.com/pilu) | Middleware that assigns a random X-Request-Id header to each request | +| [mgo session](https://github.com/joeljames/nigroni-mgo-session) | [Joel James](https://github.com/joeljames) | Middleware that handles creating and closing mgo sessions per request | +| [digits](https://github.com/bamarni/digits) | [Bilal Amarni](https://github.com/bamarni) | Middleware that handles [Twitter Digits](https://get.digits.com/) authentication | + +## Examples + +[Alexander Rødseth](https://github.com/xyproto) created +[mooseware](https://github.com/xyproto/mooseware), a skeleton for writing a +Negroni middleware handler. + +[Prasanga Siripala](https://github.com/pjebs) created an effective skeleton structure for web-based Go/Negroni projects: [Go-Skeleton](https://github.com/pjebs/go-skeleton) + +## Live code reload? + +[gin](https://github.com/codegangsta/gin) and +[fresh](https://github.com/pilu/fresh) both live reload negroni apps. + +## Essential Reading for Beginners of Go & Negroni + +* [Using a Context to pass information from middleware to end handler](http://elithrar.github.io/article/map-string-interface/) +* [Understanding middleware](https://mattstauffer.co/blog/laravel-5.0-middleware-filter-style) + +## About + +Negroni is obsessively designed by none other than the [Code +Gangsta](https://codegangsta.io/) + +[Gorilla Mux]: https://github.com/gorilla/mux +[`http.FileSystem`]: https://godoc.org/net/http#FileSystem diff --git a/vendor/github.com/codegangsta/negroni/doc.go b/vendor/github.com/codegangsta/negroni/doc.go new file mode 100644 index 000000000000..add1ed9f713b --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/doc.go @@ -0,0 +1,25 @@ +// Package negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of net/http Handlers. +// +// If you like the idea of Martini, but you think it contains too much magic, then Negroni is a great fit. +// +// For a full guide visit http://github.com/urfave/negroni +// +// package main +// +// import ( +// "github.com/urfave/negroni" +// "net/http" +// "fmt" +// ) +// +// func main() { +// mux := http.NewServeMux() +// mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { +// fmt.Fprintf(w, "Welcome to the home page!") +// }) +// +// n := negroni.Classic() +// n.UseHandler(mux) +// n.Run(":3000") +// } +package negroni diff --git a/vendor/github.com/codegangsta/negroni/logger.go b/vendor/github.com/codegangsta/negroni/logger.go new file mode 100644 index 000000000000..5bf7731ce7c1 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/logger.go @@ -0,0 +1,82 @@ +package negroni + +import ( + "bytes" + + "log" + "net/http" + "os" + "text/template" + "time" +) + +// LoggerEntry is the structure +// passed to the template. +type LoggerEntry struct { + StartTime string + Status int + Duration time.Duration + Hostname string + Method string + Path string + Request *http.Request +} + +// LoggerDefaultFormat is the format +// logged used by the default Logger instance. +var LoggerDefaultFormat = "{{.StartTime}} | {{.Status}} | \t {{.Duration}} | {{.Hostname}} | {{.Method}} {{.Path}} \n" + +// LoggerDefaultDateFormat is the +// format used for date by the +// default Logger instance. +var LoggerDefaultDateFormat = time.RFC3339 + +// ALogger interface +type ALogger interface { + Println(v ...interface{}) + Printf(format string, v ...interface{}) +} + +// Logger is a middleware handler that logs the request as it goes in and the response as it goes out. +type Logger struct { + // ALogger implements just enough log.Logger interface to be compatible with other implementations + ALogger + dateFormat string + template *template.Template +} + +// NewLogger returns a new Logger instance +func NewLogger() *Logger { + logger := &Logger{ALogger: log.New(os.Stdout, "[negroni] ", 0), dateFormat: LoggerDefaultDateFormat} + logger.SetFormat(LoggerDefaultFormat) + return logger +} + +func (l *Logger) SetFormat(format string) { + l.template = template.Must(template.New("negroni_parser").Parse(format)) +} + +func (l *Logger) SetDateFormat(format string) { + l.dateFormat = format +} + +func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + start := time.Now() + + next(rw, r) + + res := rw.(ResponseWriter) + log := LoggerEntry{ + StartTime: start.Format(l.dateFormat), + Status: res.Status(), + Duration: time.Since(start), + Hostname: r.Host, + Method: r.Method, + Path: r.URL.Path, + Request: r, + } + + buff := &bytes.Buffer{} + l.template.Execute(buff, log) + l.Printf(buff.String()) +} diff --git a/vendor/github.com/codegangsta/negroni/negroni.go b/vendor/github.com/codegangsta/negroni/negroni.go new file mode 100644 index 000000000000..d1d77820a0c3 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/negroni.go @@ -0,0 +1,169 @@ +package negroni + +import ( + "log" + "net/http" + "os" +) + +const ( + // DefaultAddress is used if no other is specified. + DefaultAddress = ":8080" +) + +// Handler handler is an interface that objects can implement to be registered to serve as middleware +// in the Negroni middleware stack. +// ServeHTTP should yield to the next middleware in the chain by invoking the next http.HandlerFunc +// passed in. +// +// If the Handler writes to the ResponseWriter, the next http.HandlerFunc should not be invoked. +type Handler interface { + ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) +} + +// HandlerFunc is an adapter to allow the use of ordinary functions as Negroni handlers. +// If f is a function with the appropriate signature, HandlerFunc(f) is a Handler object that calls f. +type HandlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) + +func (h HandlerFunc) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + h(rw, r, next) +} + +type middleware struct { + handler Handler + next *middleware +} + +func (m middleware) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + m.handler.ServeHTTP(rw, r, m.next.ServeHTTP) +} + +// Wrap converts a http.Handler into a negroni.Handler so it can be used as a Negroni +// middleware. The next http.HandlerFunc is automatically called after the Handler +// is executed. +func Wrap(handler http.Handler) Handler { + return HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + handler.ServeHTTP(rw, r) + next(rw, r) + }) +} + +// WrapFunc converts a http.HandlerFunc into a negroni.Handler so it can be used as a Negroni +// middleware. The next http.HandlerFunc is automatically called after the Handler +// is executed. +func WrapFunc(handlerFunc http.HandlerFunc) Handler { + return HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + handlerFunc(rw, r) + next(rw, r) + }) +} + +// Negroni is a stack of Middleware Handlers that can be invoked as an http.Handler. +// Negroni middleware is evaluated in the order that they are added to the stack using +// the Use and UseHandler methods. +type Negroni struct { + middleware middleware + handlers []Handler +} + +// New returns a new Negroni instance with no middleware preconfigured. +func New(handlers ...Handler) *Negroni { + return &Negroni{ + handlers: handlers, + middleware: build(handlers), + } +} + +// With returns a new Negroni instance that is a combination of the negroni +// receiver's handlers and the provided handlers. +func (n *Negroni) With(handlers ...Handler) *Negroni { + return New( + append(n.handlers, handlers...)..., + ) +} + +// Classic returns a new Negroni instance with the default middleware already +// in the stack. +// +// Recovery - Panic Recovery Middleware +// Logger - Request/Response Logging +// Static - Static File Serving +func Classic() *Negroni { + return New(NewRecovery(), NewLogger(), NewStatic(http.Dir("public"))) +} + +func (n *Negroni) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + n.middleware.ServeHTTP(NewResponseWriter(rw), r) +} + +// Use adds a Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni. +func (n *Negroni) Use(handler Handler) { + if handler == nil { + panic("handler cannot be nil") + } + + n.handlers = append(n.handlers, handler) + n.middleware = build(n.handlers) +} + +// UseFunc adds a Negroni-style handler function onto the middleware stack. +func (n *Negroni) UseFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)) { + n.Use(HandlerFunc(handlerFunc)) +} + +// UseHandler adds a http.Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni. +func (n *Negroni) UseHandler(handler http.Handler) { + n.Use(Wrap(handler)) +} + +// UseHandlerFunc adds a http.HandlerFunc-style handler function onto the middleware stack. +func (n *Negroni) UseHandlerFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request)) { + n.UseHandler(http.HandlerFunc(handlerFunc)) +} + +// Run is a convenience function that runs the negroni stack as an HTTP +// server. The addr string, if provided, takes the same format as http.ListenAndServe. +// If no address is provided but the PORT environment variable is set, the PORT value is used. +// If neither is provided, the address' value will equal the DefaultAddress constant. +func (n *Negroni) Run(addr ...string) { + l := log.New(os.Stdout, "[negroni] ", 0) + finalAddr := detectAddress(addr...) + l.Printf("listening on %s", finalAddr) + l.Fatal(http.ListenAndServe(finalAddr, n)) +} + +func detectAddress(addr ...string) string { + if len(addr) > 0 { + return addr[0] + } + if port := os.Getenv("PORT"); port != "" { + return ":" + port + } + return DefaultAddress +} + +// Returns a list of all the handlers in the current Negroni middleware chain. +func (n *Negroni) Handlers() []Handler { + return n.handlers +} + +func build(handlers []Handler) middleware { + var next middleware + + if len(handlers) == 0 { + return voidMiddleware() + } else if len(handlers) > 1 { + next = build(handlers[1:]) + } else { + next = voidMiddleware() + } + + return middleware{handlers[0], &next} +} + +func voidMiddleware() middleware { + return middleware{ + HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {}), + &middleware{}, + } +} diff --git a/vendor/github.com/codegangsta/negroni/recovery.go b/vendor/github.com/codegangsta/negroni/recovery.go new file mode 100644 index 000000000000..c6fc24ef51e6 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/recovery.go @@ -0,0 +1,194 @@ +package negroni + +import ( + "fmt" + "log" + "net/http" + "os" + "runtime" + "runtime/debug" + "text/template" +) + +const ( + panicText = "PANIC: %s\n%s" + panicHTML = ` +PANIC: {{.RecoveredPanic}} + + +

Negroni - PANIC

+ +
+

{{.RequestDescription}}

+ Runtime error: {{.RecoveredPanic}} +
+ +{{ if .Stack }} +
+

Runtime Stack

+
{{.StackAsString}}
+
+{{ end }} + + +` + nilRequestMessage = "Request is nil" +) + +var panicHTMLTemplate = template.Must(template.New("PanicPage").Parse(panicHTML)) + +// PanicInformation contains all +// elements for printing stack informations. +type PanicInformation struct { + RecoveredPanic interface{} + Stack []byte + Request *http.Request +} + +// StackAsString returns a printable version of the stack +func (p *PanicInformation) StackAsString() string { + return string(p.Stack) +} + +// RequestDescription returns a printable description of the url +func (p *PanicInformation) RequestDescription() string { + + if p.Request == nil { + return nilRequestMessage + } + + var queryOutput string + if p.Request.URL.RawQuery != "" { + queryOutput = "?" + p.Request.URL.RawQuery + } + return fmt.Sprintf("%s %s%s", p.Request.Method, p.Request.URL.Path, queryOutput) +} + +// PanicFormatter is an interface on object can implement +// to be able to output the stack trace +type PanicFormatter interface { + // FormatPanicError output the stack for a given answer/response. + // In case the the middleware should not output the stack trace, + // the field `Stack` of the passed `PanicInformation` instance equals `[]byte{}`. + FormatPanicError(rw http.ResponseWriter, r *http.Request, infos *PanicInformation) +} + +// TextPanicFormatter output the stack +// as simple text on os.Stdout. If no `Content-Type` is set, +// it will output the data as `text/plain; charset=utf-8`. +// Otherwise, the origin `Content-Type` is kept. +type TextPanicFormatter struct{} + +func (t *TextPanicFormatter) FormatPanicError(rw http.ResponseWriter, r *http.Request, infos *PanicInformation) { + if rw.Header().Get("Content-Type") == "" { + rw.Header().Set("Content-Type", "text/plain; charset=utf-8") + } + fmt.Fprintf(rw, panicText, infos.RecoveredPanic, infos.Stack) +} + +// HTMLPanicFormatter output the stack inside +// an HTML page. This has been largely inspired by +// https://github.com/go-martini/martini/pull/156/commits. +type HTMLPanicFormatter struct{} + +func (t *HTMLPanicFormatter) FormatPanicError(rw http.ResponseWriter, r *http.Request, infos *PanicInformation) { + if rw.Header().Get("Content-Type") == "" { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + } + panicHTMLTemplate.Execute(rw, infos) +} + +// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one. +type Recovery struct { + Logger ALogger + PrintStack bool + PanicHandlerFunc func(*PanicInformation) + StackAll bool + StackSize int + Formatter PanicFormatter + + // Deprecated: Use PanicHandlerFunc instead to receive panic + // error with additional information (see PanicInformation) + ErrorHandlerFunc func(interface{}) +} + +// NewRecovery returns a new instance of Recovery +func NewRecovery() *Recovery { + return &Recovery{ + Logger: log.New(os.Stdout, "[negroni] ", 0), + PrintStack: true, + StackAll: false, + StackSize: 1024 * 8, + Formatter: &TextPanicFormatter{}, + } +} + +func (rec *Recovery) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + defer func() { + if err := recover(); err != nil { + rw.WriteHeader(http.StatusInternalServerError) + + stack := make([]byte, rec.StackSize) + stack = stack[:runtime.Stack(stack, rec.StackAll)] + infos := &PanicInformation{RecoveredPanic: err, Request: r} + + if rec.PrintStack { + infos.Stack = stack + } + rec.Logger.Printf(panicText, err, stack) + rec.Formatter.FormatPanicError(rw, r, infos) + + if rec.ErrorHandlerFunc != nil { + func() { + defer func() { + if err := recover(); err != nil { + rec.Logger.Printf("provided ErrorHandlerFunc panic'd: %s, trace:\n%s", err, debug.Stack()) + rec.Logger.Printf("%s\n", debug.Stack()) + } + }() + rec.ErrorHandlerFunc(err) + }() + } + if rec.PanicHandlerFunc != nil { + func() { + defer func() { + if err := recover(); err != nil { + rec.Logger.Printf("provided PanicHandlerFunc panic'd: %s, trace:\n%s", err, debug.Stack()) + rec.Logger.Printf("%s\n", debug.Stack()) + } + }() + rec.PanicHandlerFunc(infos) + }() + } + } + }() + + next(rw, r) +} diff --git a/vendor/github.com/codegangsta/negroni/response_writer.go b/vendor/github.com/codegangsta/negroni/response_writer.go new file mode 100644 index 000000000000..cc507eb4b757 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/response_writer.go @@ -0,0 +1,113 @@ +package negroni + +import ( + "bufio" + "fmt" + "net" + "net/http" +) + +// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about +// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter +// if the functionality calls for it. +type ResponseWriter interface { + http.ResponseWriter + http.Flusher + // Status returns the status code of the response or 0 if the response has + // not been written + Status() int + // Written returns whether or not the ResponseWriter has been written. + Written() bool + // Size returns the size of the response body. + Size() int + // Before allows for a function to be called before the ResponseWriter has been written to. This is + // useful for setting headers or any other operations that must happen before a response has been written. + Before(func(ResponseWriter)) +} + +type beforeFunc func(ResponseWriter) + +// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter +func NewResponseWriter(rw http.ResponseWriter) ResponseWriter { + nrw := &responseWriter{ + ResponseWriter: rw, + } + + if _, ok := rw.(http.CloseNotifier); ok { + return &responseWriterCloseNotifer{nrw} + } + + return nrw +} + +type responseWriter struct { + http.ResponseWriter + status int + size int + beforeFuncs []beforeFunc +} + +func (rw *responseWriter) WriteHeader(s int) { + rw.status = s + rw.callBefore() + rw.ResponseWriter.WriteHeader(s) +} + +func (rw *responseWriter) Write(b []byte) (int, error) { + if !rw.Written() { + // The status will be StatusOK if WriteHeader has not been called yet + rw.WriteHeader(http.StatusOK) + } + size, err := rw.ResponseWriter.Write(b) + rw.size += size + return size, err +} + +func (rw *responseWriter) Status() int { + return rw.status +} + +func (rw *responseWriter) Size() int { + return rw.size +} + +func (rw *responseWriter) Written() bool { + return rw.status != 0 +} + +func (rw *responseWriter) Before(before func(ResponseWriter)) { + rw.beforeFuncs = append(rw.beforeFuncs, before) +} + +func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := rw.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +func (rw *responseWriter) callBefore() { + for i := len(rw.beforeFuncs) - 1; i >= 0; i-- { + rw.beforeFuncs[i](rw) + } +} + +func (rw *responseWriter) Flush() { + flusher, ok := rw.ResponseWriter.(http.Flusher) + if ok { + if !rw.Written() { + // The status will be StatusOK if WriteHeader has not been called yet + rw.WriteHeader(http.StatusOK) + } + flusher.Flush() + } +} + +type responseWriterCloseNotifer struct { + *responseWriter +} + +func (rw *responseWriterCloseNotifer) CloseNotify() <-chan bool { + return rw.ResponseWriter.(http.CloseNotifier).CloseNotify() +} diff --git a/vendor/github.com/codegangsta/negroni/response_writer_pusher.go b/vendor/github.com/codegangsta/negroni/response_writer_pusher.go new file mode 100644 index 000000000000..213cb35f7c70 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/response_writer_pusher.go @@ -0,0 +1,16 @@ +//+build go1.8 + +package negroni + +import ( + "fmt" + "net/http" +) + +func (rw *responseWriter) Push(target string, opts *http.PushOptions) error { + pusher, ok := rw.ResponseWriter.(http.Pusher) + if ok { + return pusher.Push(target, opts) + } + return fmt.Errorf("the ResponseWriter doesn't support the Pusher interface") +} diff --git a/vendor/github.com/codegangsta/negroni/static.go b/vendor/github.com/codegangsta/negroni/static.go new file mode 100644 index 000000000000..34be967c0526 --- /dev/null +++ b/vendor/github.com/codegangsta/negroni/static.go @@ -0,0 +1,88 @@ +package negroni + +import ( + "net/http" + "path" + "strings" +) + +// Static is a middleware handler that serves static files in the given +// directory/filesystem. If the file does not exist on the filesystem, it +// passes along to the next middleware in the chain. If you desire "fileserver" +// type behavior where it returns a 404 for unfound files, you should consider +// using http.FileServer from the Go stdlib. +type Static struct { + // Dir is the directory to serve static files from + Dir http.FileSystem + // Prefix is the optional prefix used to serve the static directory content + Prefix string + // IndexFile defines which file to serve as index if it exists. + IndexFile string +} + +// NewStatic returns a new instance of Static +func NewStatic(directory http.FileSystem) *Static { + return &Static{ + Dir: directory, + Prefix: "", + IndexFile: "index.html", + } +} + +func (s *Static) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + if r.Method != "GET" && r.Method != "HEAD" { + next(rw, r) + return + } + file := r.URL.Path + // if we have a prefix, filter requests by stripping the prefix + if s.Prefix != "" { + if !strings.HasPrefix(file, s.Prefix) { + next(rw, r) + return + } + file = file[len(s.Prefix):] + if file != "" && file[0] != '/' { + next(rw, r) + return + } + } + f, err := s.Dir.Open(file) + if err != nil { + // discard the error? + next(rw, r) + return + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + next(rw, r) + return + } + + // try to serve index file + if fi.IsDir() { + // redirect if missing trailing slash + if !strings.HasSuffix(r.URL.Path, "/") { + http.Redirect(rw, r, r.URL.Path+"/", http.StatusFound) + return + } + + file = path.Join(file, s.IndexFile) + f, err = s.Dir.Open(file) + if err != nil { + next(rw, r) + return + } + defer f.Close() + + fi, err = f.Stat() + if err != nil || fi.IsDir() { + next(rw, r) + return + } + } + + http.ServeContent(rw, r, file, fi.ModTime(), f) +} diff --git a/vendor/github.com/coreos/bbolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore new file mode 100644 index 000000000000..c7bd2b7a5b84 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/.gitignore @@ -0,0 +1,4 @@ +*.prof +*.test +*.swp +/bin/ diff --git a/vendor/github.com/coreos/bbolt/BUILD b/vendor/github.com/coreos/bbolt/BUILD new file mode 100644 index 000000000000..3a27384727e9 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/BUILD @@ -0,0 +1,102 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "bucket.go", + "cursor.go", + "db.go", + "doc.go", + "errors.go", + "freelist.go", + "node.go", + "page.go", + "tx.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "bolt_linux.go", + "bolt_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "bolt_unix.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "bolt_openbsd.go", + "bolt_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "bolt_unix_solaris.go", + "boltsync_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "bolt_windows.go", + ], + "//conditions:default": [], + }) + select({ + "@io_bazel_rules_go//go/platform:386": [ + "bolt_386.go", + ], + "@io_bazel_rules_go//go/platform:amd64": [ + "bolt_amd64.go", + ], + "@io_bazel_rules_go//go/platform:arm": [ + "bolt_arm.go", + ], + "@io_bazel_rules_go//go/platform:arm64": [ + "bolt_arm64.go", + ], + "@io_bazel_rules_go//go/platform:ppc64": [ + "bolt_ppc64.go", + ], + "@io_bazel_rules_go//go/platform:ppc64le": [ + "bolt_ppc64le.go", + ], + "@io_bazel_rules_go//go/platform:s390x": [ + "bolt_s390x.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/coreos/bbolt", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/bbolt/LICENSE b/vendor/github.com/coreos/bbolt/LICENSE new file mode 100644 index 000000000000..004e77fe5d2e --- /dev/null +++ b/vendor/github.com/coreos/bbolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile new file mode 100644 index 000000000000..e035e63adcd7 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/Makefile @@ -0,0 +1,18 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt + +test: + @go test -v -cover . + @go test -v ./cmd/bolt + +.PHONY: fmt test diff --git a/vendor/github.com/coreos/bbolt/README.md b/vendor/github.com/coreos/bbolt/README.md new file mode 100644 index 000000000000..8523e3377344 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/README.md @@ -0,0 +1,852 @@ +Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable and the API is fixed. Full unit test coverage and randomized +black box testing are used to ensure database consistency and thread safety. +Bolt is currently in high-load production environments serving databases as +large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed +services every day. + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `Tx.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/coreos/bbolt/appveyor.yml b/vendor/github.com/coreos/bbolt/appveyor.yml new file mode 100644 index 000000000000..6e26e941d682 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/appveyor.yml @@ -0,0 +1,18 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\boltdb\bolt + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - go version + - go env + - go get -v -t ./... + +build_script: + - go test -v ./... diff --git a/vendor/github.com/coreos/bbolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go new file mode 100644 index 000000000000..e659bfb91f33 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_386.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/coreos/bbolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go new file mode 100644 index 000000000000..cca6b7eb7070 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_amd64.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go new file mode 100644 index 000000000000..e659bfb91f33 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/coreos/bbolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go new file mode 100644 index 000000000000..6d2309352e06 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/coreos/bbolt/bolt_linux.go b/vendor/github.com/coreos/bbolt/bolt_linux.go new file mode 100644 index 000000000000..2b6766614090 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/coreos/bbolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go new file mode 100644 index 000000000000..7058c3d734ef --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go new file mode 100644 index 000000000000..645ddc3edc2d --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go new file mode 100644 index 000000000000..2dc6be02e3e3 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go new file mode 100644 index 000000000000..8351e129f6a3 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/coreos/bbolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go new file mode 100644 index 000000000000..f4dd26bbba7c --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/coreos/bbolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go new file mode 100644 index 000000000000..cad62dda1e38 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go new file mode 100644 index 000000000000..307bf2b3ee97 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/coreos/bbolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go new file mode 100644 index 000000000000..d538e6afd77a --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path+lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/coreos/bbolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go new file mode 100644 index 000000000000..f50442523c38 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/coreos/bbolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go new file mode 100644 index 000000000000..d2f8c524e42f --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bucket.go @@ -0,0 +1,748 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } else { + return nil, ErrIncompatibleValue + } + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/coreos/bbolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go new file mode 100644 index 000000000000..1be9f35e3ef8 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/coreos/bbolt/db.go b/vendor/github.com/coreos/bbolt/db.go new file mode 100644 index 000000000000..1223493ca7be --- /dev/null +++ b/vendor/github.com/coreos/bbolt/db.go @@ -0,0 +1,1036 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + // If we can't read the page size, we can assume it's the same + // as the OS -- since that's how the page size was chosen in the + // first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + db.pageSize = os.Getpagesize() + } else { + db.pageSize = int(m.pageSize) + } + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + db.txs = append(db.txs[:i], db.txs[i+1:]...) + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = other.TxN - s.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/coreos/bbolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go new file mode 100644 index 000000000000..cc937845dba0 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/coreos/bbolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go new file mode 100644 index 000000000000..a3620a3ebb29 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/errors.go @@ -0,0 +1,71 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go new file mode 100644 index 000000000000..1b7ba91b2a51 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/freelist.go @@ -0,0 +1,248 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// all returns a list of all free ids and all pending ids in one sorted list. +func (f *freelist) all() []pgid { + m := make(pgids, 0) + + for _, list := range f.pending { + m = append(m, list...) + } + + sort.Sort(m) + return pgids(f.ids).merge(m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + } + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + ids := f.all() + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + if len(ids) == 0 { + p.count = uint16(len(ids)) + } else if len(ids) < 0xFFFF { + p.count = uint16(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/coreos/bbolt/node.go b/vendor/github.com/coreos/bbolt/node.go new file mode 100644 index 000000000000..159318b229cb --- /dev/null +++ b/vendor/github.com/coreos/bbolt/node.go @@ -0,0 +1,604 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/coreos/bbolt/page.go b/vendor/github.com/coreos/bbolt/page.go new file mode 100644 index 000000000000..7651a6bf7d99 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/page.go @@ -0,0 +1,178 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + // Create a list to hold all elements from both lists. + merged := make(pgids, 0, len(a)+len(b)) + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + merged = append(merged, follow...) + + return merged +} diff --git a/vendor/github.com/coreos/bbolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go new file mode 100644 index 000000000000..1cfb4cde8555 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/tx.go @@ -0,0 +1,682 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + for _, id := range tx.db.freelist.all() { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/coreos/etcd/Documentation/README.md b/vendor/github.com/coreos/etcd/Documentation/README.md new file mode 120000 index 000000000000..8828313f5be6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Documentation/README.md @@ -0,0 +1 @@ +docs.md \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 000000000000..b39ddfa5cbde --- /dev/null +++ b/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/alarm/BUILD b/vendor/github.com/coreos/etcd/alarm/BUILD new file mode 100644 index 000000000000..c58cde9841e9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/alarm/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["alarms.go"], + importpath = "github.com/coreos/etcd/alarm", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/alarm/alarms.go b/vendor/github.com/coreos/etcd/alarm/alarms.go new file mode 100644 index 000000000000..4f0ebe93f3bf --- /dev/null +++ b/vendor/github.com/coreos/etcd/alarm/alarms.go @@ -0,0 +1,152 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package alarm manages health status alarms in etcd. +package alarm + +import ( + "sync" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/pkg/capnslog" +) + +var ( + alarmBucketName = []byte("alarm") + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "alarm") +) + +type BackendGetter interface { + Backend() backend.Backend +} + +type alarmSet map[types.ID]*pb.AlarmMember + +// AlarmStore persists alarms to the backend. +type AlarmStore struct { + mu sync.Mutex + types map[pb.AlarmType]alarmSet + + bg BackendGetter +} + +func NewAlarmStore(bg BackendGetter) (*AlarmStore, error) { + ret := &AlarmStore{types: make(map[pb.AlarmType]alarmSet), bg: bg} + err := ret.restore() + return ret, err +} + +func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember { + a.mu.Lock() + defer a.mu.Unlock() + + newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at} + if m := a.addToMap(newAlarm); m != newAlarm { + return m + } + + v, err := newAlarm.Marshal() + if err != nil { + plog.Panicf("failed to marshal alarm member") + } + + b := a.bg.Backend() + b.BatchTx().Lock() + b.BatchTx().UnsafePut(alarmBucketName, v, nil) + b.BatchTx().Unlock() + + return newAlarm +} + +func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember { + a.mu.Lock() + defer a.mu.Unlock() + + t := a.types[at] + if t == nil { + t = make(alarmSet) + a.types[at] = t + } + m := t[id] + if m == nil { + return nil + } + + delete(t, id) + + v, err := m.Marshal() + if err != nil { + plog.Panicf("failed to marshal alarm member") + } + + b := a.bg.Backend() + b.BatchTx().Lock() + b.BatchTx().UnsafeDelete(alarmBucketName, v) + b.BatchTx().Unlock() + + return m +} + +func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) { + a.mu.Lock() + defer a.mu.Unlock() + if at == pb.AlarmType_NONE { + for _, t := range a.types { + for _, m := range t { + ret = append(ret, m) + } + } + return ret + } + for _, m := range a.types[at] { + ret = append(ret, m) + } + return ret +} + +func (a *AlarmStore) restore() error { + b := a.bg.Backend() + tx := b.BatchTx() + + tx.Lock() + tx.UnsafeCreateBucket(alarmBucketName) + err := tx.UnsafeForEach(alarmBucketName, func(k, v []byte) error { + var m pb.AlarmMember + if err := m.Unmarshal(k); err != nil { + return err + } + a.addToMap(&m) + return nil + }) + tx.Unlock() + + b.ForceCommit() + return err +} + +func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember { + t := a.types[newAlarm.Alarm] + if t == nil { + t = make(alarmSet) + a.types[newAlarm.Alarm] = t + } + m := t[types.ID(newAlarm.MemberID)] + if m != nil { + return m + } + t[types.ID(newAlarm.MemberID)] = newAlarm + return newAlarm +} diff --git a/vendor/github.com/coreos/etcd/auth/BUILD b/vendor/github.com/coreos/etcd/auth/BUILD new file mode 100644 index 000000000000..892b00a396f6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/BUILD @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "jwt.go", + "range_perm_cache.go", + "simple_token.go", + "store.go", + ], + importpath = "github.com/coreos/etcd/auth", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/dgrijalva/jwt-go:go_default_library", + "//vendor/golang.org/x/crypto/bcrypt:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/peer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/auth/authpb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/auth/authpb/BUILD b/vendor/github.com/coreos/etcd/auth/authpb/BUILD new file mode 100644 index 000000000000..36b62358a76e --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/authpb/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["auth.pb.go"], + importpath = "github.com/coreos/etcd/auth/authpb", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go new file mode 100644 index 000000000000..009ebda70ca5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go @@ -0,0 +1,824 @@ +// Code generated by protoc-gen-gogo. +// source: auth.proto +// DO NOT EDIT! + +/* + Package authpb is a generated protocol buffer package. + + It is generated from these files: + auth.proto + + It has these top-level messages: + User + Permission + Role +*/ +package authpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Permission_Type int32 + +const ( + READ Permission_Type = 0 + WRITE Permission_Type = 1 + READWRITE Permission_Type = 2 +) + +var Permission_Type_name = map[int32]string{ + 0: "READ", + 1: "WRITE", + 2: "READWRITE", +} +var Permission_Type_value = map[string]int32{ + "READ": 0, + "WRITE": 1, + "READWRITE": 2, +} + +func (x Permission_Type) String() string { + return proto.EnumName(Permission_Type_name, int32(x)) +} +func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} } + +// User is a single entry in the bucket authUsers +type User struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } + +// Permission is a single entity +type Permission struct { + PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } + +// Role is a single entry in the bucket authRoles +type Role struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` +} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } + +func init() { + proto.RegisterType((*User)(nil), "authpb.User") + proto.RegisterType((*Permission)(nil), "authpb.Permission") + proto.RegisterType((*Role)(nil), "authpb.Role") + proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) +} +func (m *User) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *User) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *Permission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PermType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.KeyPermission) > 0 { + for _, msg := range m.KeyPermission { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *User) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovAuth(uint64(l)) + } + } + return n +} + +func (m *Permission) Size() (n int) { + var l int + _ = l + if m.PermType != 0 { + n += 1 + sovAuth(uint64(m.PermType)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.KeyPermission) > 0 { + for _, e := range m.KeyPermission { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + return n +} + +func sovAuth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *User) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: User: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) + if m.Password == nil { + m.Password = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Permission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Permission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) + } + m.PermType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PermType |= (Permission_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPermission = append(m.KeyPermission, &Permission{}) + if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAuth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAuth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } + +var fileDescriptorAuth = []byte{ + // 288 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, + 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, + 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, + 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, + 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d, + 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd, + 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51, + 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef, + 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00, + 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc, + 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70, + 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41, + 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc, + 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b, + 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1, + 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee, + 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4, + 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto new file mode 100644 index 000000000000..001d33435483 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; +package authpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +// User is a single entry in the bucket authUsers +message User { + bytes name = 1; + bytes password = 2; + repeated string roles = 3; +} + +// Permission is a single entity +message Permission { + enum Type { + READ = 0; + WRITE = 1; + READWRITE = 2; + } + Type permType = 1; + + bytes key = 2; + bytes range_end = 3; +} + +// Role is a single entry in the bucket authRoles +message Role { + bytes name = 1; + + repeated Permission keyPermission = 2; +} diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/github.com/coreos/etcd/auth/doc.go new file mode 100644 index 000000000000..72741a107745 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package auth provides client role authentication for accessing keys in etcd. +package auth diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go new file mode 100644 index 000000000000..214ae48c83a9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/jwt.go @@ -0,0 +1,137 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "crypto/rsa" + "io/ioutil" + + jwt "github.com/dgrijalva/jwt-go" + "golang.org/x/net/context" +) + +type tokenJWT struct { + signMethod string + signKey *rsa.PrivateKey + verifyKey *rsa.PublicKey +} + +func (t *tokenJWT) enable() {} +func (t *tokenJWT) disable() {} +func (t *tokenJWT) invalidateUser(string) {} +func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } + +func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + // rev isn't used in JWT, it is only used in simple token + var ( + username string + revision uint64 + ) + + parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + return t.verifyKey, nil + }) + + switch err.(type) { + case nil: + if !parsed.Valid { + plog.Warningf("invalid jwt token: %s", token) + return nil, false + } + + claims := parsed.Claims.(jwt.MapClaims) + + username = claims["username"].(string) + revision = uint64(claims["revision"].(float64)) + default: + plog.Warningf("failed to parse jwt token: %s", err) + return nil, false + } + + return &AuthInfo{Username: username, Revision: revision}, true +} + +func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { + // Future work: let a jwt token include permission information would be useful for + // permission checking in proxy side. + tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), + jwt.MapClaims{ + "username": username, + "revision": revision, + }) + + token, err := tk.SignedString(t.signKey) + if err != nil { + plog.Debugf("failed to sign jwt token: %s", err) + return "", err + } + + plog.Debugf("jwt token: %s", token) + + return token, err +} + +func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { + for k, v := range opts { + switch k { + case "sign-method": + jwtSignMethod = v + case "pub-key": + jwtPubKeyPath = v + case "priv-key": + jwtPrivKeyPath = v + default: + plog.Errorf("unknown token specific option: %s", k) + return "", "", "", ErrInvalidAuthOpts + } + } + + return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil +} + +func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { + jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + t := &tokenJWT{} + + t.signMethod = jwtSignMethod + + verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) + if err != nil { + plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) + return nil, err + } + t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) + if err != nil { + plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) + return nil, err + } + + signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) + if err != nil { + plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) + return nil, err + } + t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) + if err != nil { + plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) + return nil, err + } + + return t, nil +} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go new file mode 100644 index 000000000000..691b65ba38e7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go @@ -0,0 +1,133 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "github.com/coreos/etcd/auth/authpb" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/adt" +) + +func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { + user := getUser(tx, userName) + if user == nil { + plog.Errorf("invalid user name %s", userName) + return nil + } + + readPerms := &adt.IntervalTree{} + writePerms := &adt.IntervalTree{} + + for _, roleName := range user.Roles { + role := getRole(tx, roleName) + if role == nil { + continue + } + + for _, perm := range role.KeyPermission { + var ivl adt.Interval + var rangeEnd []byte + + if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { + rangeEnd = perm.RangeEnd + } + + if len(perm.RangeEnd) != 0 { + ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) + } else { + ivl = adt.NewBytesAffinePoint(perm.Key) + } + + switch perm.PermType { + case authpb.READWRITE: + readPerms.Insert(ivl, struct{}{}) + writePerms.Insert(ivl, struct{}{}) + + case authpb.READ: + readPerms.Insert(ivl, struct{}{}) + + case authpb.WRITE: + writePerms.Insert(ivl, struct{}{}) + } + } + } + + return &unifiedRangePermissions{ + readPerms: readPerms, + writePerms: writePerms, + } +} + +func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + rangeEnd = nil + } + + ivl := adt.NewBytesAffineInterval(key, rangeEnd) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Contains(ivl) + case authpb.WRITE: + return cachedPerms.writePerms.Contains(ivl) + default: + plog.Panicf("unknown auth type: %v", permtyp) + } + return false +} + +func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { + pt := adt.NewBytesAffinePoint(key) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Intersects(pt) + case authpb.WRITE: + return cachedPerms.writePerms.Intersects(pt) + default: + plog.Panicf("unknown auth type: %v", permtyp) + } + return false +} + +func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + // assumption: tx is Lock()ed + _, ok := as.rangePermCache[userName] + if !ok { + perms := getMergedPerms(tx, userName) + if perms == nil { + plog.Errorf("failed to create a unified permission of user %s", userName) + return false + } + as.rangePermCache[userName] = perms + } + + if len(rangeEnd) == 0 { + return checkKeyPoint(as.rangePermCache[userName], key, permtyp) + } + + return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) +} + +func (as *authStore) clearCachedPerm() { + as.rangePermCache = make(map[string]*unifiedRangePermissions) +} + +func (as *authStore) invalidateCachedPerm(userName string) { + delete(as.rangePermCache, userName) +} + +type unifiedRangePermissions struct { + readPerms *adt.IntervalTree + writePerms *adt.IntervalTree +} diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go new file mode 100644 index 000000000000..94d92a115e24 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/simple_token.go @@ -0,0 +1,220 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +// CAUTION: This randum number based token mechanism is only for testing purpose. +// JWT based mechanism will be added in the near future. + +import ( + "crypto/rand" + "fmt" + "math/big" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +const ( + letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + defaultSimpleTokenLength = 16 +) + +// var for testing purposes +var ( + simpleTokenTTL = 5 * time.Minute + simpleTokenTTLResolution = 1 * time.Second +) + +type simpleTokenTTLKeeper struct { + tokens map[string]time.Time + donec chan struct{} + stopc chan struct{} + deleteTokenFunc func(string) + mu *sync.Mutex +} + +func (tm *simpleTokenTTLKeeper) stop() { + select { + case tm.stopc <- struct{}{}: + case <-tm.donec: + } + <-tm.donec +} + +func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) { + tm.tokens[token] = time.Now().Add(simpleTokenTTL) +} + +func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) { + if _, ok := tm.tokens[token]; ok { + tm.tokens[token] = time.Now().Add(simpleTokenTTL) + } +} + +func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) { + delete(tm.tokens, token) +} + +func (tm *simpleTokenTTLKeeper) run() { + tokenTicker := time.NewTicker(simpleTokenTTLResolution) + defer func() { + tokenTicker.Stop() + close(tm.donec) + }() + for { + select { + case <-tokenTicker.C: + nowtime := time.Now() + tm.mu.Lock() + for t, tokenendtime := range tm.tokens { + if nowtime.After(tokenendtime) { + tm.deleteTokenFunc(t) + delete(tm.tokens, t) + } + } + tm.mu.Unlock() + case <-tm.stopc: + return + } + } +} + +type tokenSimple struct { + indexWaiter func(uint64) <-chan struct{} + simpleTokenKeeper *simpleTokenTTLKeeper + simpleTokensMu sync.Mutex + simpleTokens map[string]string // token -> username +} + +func (t *tokenSimple) genTokenPrefix() (string, error) { + ret := make([]byte, defaultSimpleTokenLength) + + for i := 0; i < defaultSimpleTokenLength; i++ { + bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) + if err != nil { + return "", err + } + + ret[i] = letters[bInt.Int64()] + } + + return string(ret), nil +} + +func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { + t.simpleTokensMu.Lock() + _, ok := t.simpleTokens[token] + if ok { + plog.Panicf("token %s is alredy used", token) + } + + t.simpleTokens[token] = username + t.simpleTokenKeeper.addSimpleToken(token) + t.simpleTokensMu.Unlock() +} + +func (t *tokenSimple) invalidateUser(username string) { + if t.simpleTokenKeeper == nil { + return + } + t.simpleTokensMu.Lock() + for token, name := range t.simpleTokens { + if strings.Compare(name, username) == 0 { + delete(t.simpleTokens, token) + t.simpleTokenKeeper.deleteSimpleToken(token) + } + } + t.simpleTokensMu.Unlock() +} + +func (t *tokenSimple) enable() { + delf := func(tk string) { + if username, ok := t.simpleTokens[tk]; ok { + plog.Infof("deleting token %s for user %s", tk, username) + delete(t.simpleTokens, tk) + } + } + t.simpleTokenKeeper = &simpleTokenTTLKeeper{ + tokens: make(map[string]time.Time), + donec: make(chan struct{}), + stopc: make(chan struct{}), + deleteTokenFunc: delf, + mu: &t.simpleTokensMu, + } + go t.simpleTokenKeeper.run() +} + +func (t *tokenSimple) disable() { + t.simpleTokensMu.Lock() + tk := t.simpleTokenKeeper + t.simpleTokenKeeper = nil + t.simpleTokens = make(map[string]string) // invalidate all tokens + t.simpleTokensMu.Unlock() + if tk != nil { + tk.stop() + } +} + +func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) { + if !t.isValidSimpleToken(ctx, token) { + return nil, false + } + t.simpleTokensMu.Lock() + username, ok := t.simpleTokens[token] + if ok && t.simpleTokenKeeper != nil { + t.simpleTokenKeeper.resetSimpleToken(token) + } + t.simpleTokensMu.Unlock() + return &AuthInfo{Username: username, Revision: revision}, ok +} + +func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { + // rev isn't used in simple token, it is only used in JWT + index := ctx.Value("index").(uint64) + simpleToken := ctx.Value("simpleToken").(string) + token := fmt.Sprintf("%s.%d", simpleToken, index) + t.assignSimpleTokenToUser(username, token) + + return token, nil +} + +func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool { + splitted := strings.Split(token, ".") + if len(splitted) != 2 { + return false + } + index, err := strconv.Atoi(splitted[1]) + if err != nil { + return false + } + + select { + case <-t.indexWaiter(uint64(index)): + return true + case <-ctx.Done(): + } + + return false +} + +func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { + return &tokenSimple{ + simpleTokens: make(map[string]string), + indexWaiter: indexWaiter, + } +} diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go new file mode 100644 index 000000000000..3fac7f5a6fd7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/store.go @@ -0,0 +1,1059 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "bytes" + "encoding/binary" + "errors" + "sort" + "strings" + "sync" + "sync/atomic" + + "github.com/coreos/etcd/auth/authpb" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/pkg/capnslog" + "golang.org/x/crypto/bcrypt" + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +var ( + enableFlagKey = []byte("authEnabled") + authEnabled = []byte{1} + authDisabled = []byte{0} + + revisionKey = []byte("authRevision") + + authBucketName = []byte("auth") + authUsersBucketName = []byte("authUsers") + authRolesBucketName = []byte("authRoles") + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth") + + ErrRootUserNotExist = errors.New("auth: root user does not exist") + ErrRootRoleNotExist = errors.New("auth: root user does not have root role") + ErrUserAlreadyExist = errors.New("auth: user already exists") + ErrUserEmpty = errors.New("auth: user name is empty") + ErrUserNotFound = errors.New("auth: user not found") + ErrRoleAlreadyExist = errors.New("auth: role already exists") + ErrRoleNotFound = errors.New("auth: role not found") + ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") + ErrPermissionDenied = errors.New("auth: permission denied") + ErrRoleNotGranted = errors.New("auth: role is not granted to the user") + ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role") + ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") + ErrAuthOldRevision = errors.New("auth: revision in header is old") + ErrInvalidAuthToken = errors.New("auth: invalid auth token") + ErrInvalidAuthOpts = errors.New("auth: invalid auth options") + ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") + + // BcryptCost is the algorithm cost / strength for hashing auth passwords + BcryptCost = bcrypt.DefaultCost +) + +const ( + rootUser = "root" + rootRole = "root" + + revBytesLen = 8 +) + +type AuthInfo struct { + Username string + Revision uint64 +} + +type AuthStore interface { + // AuthEnable turns on the authentication feature + AuthEnable() error + + // AuthDisable turns off the authentication feature + AuthDisable() + + // Authenticate does authentication based on given user name and password + Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) + + // Recover recovers the state of auth store from the given backend + Recover(b backend.Backend) + + // UserAdd adds a new user + UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + + // UserDelete deletes a user + UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + + // UserChangePassword changes a password of a user + UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + + // UserGrantRole grants a role to the user + UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + + // UserGet gets the detailed information of a users + UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + + // UserRevokeRole revokes a role of a user + UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + + // RoleAdd adds a new role + RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + + // RoleGrantPermission grants a permission to a role + RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + + // RoleGet gets the detailed information of a role + RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + + // RoleRevokePermission gets the detailed information of a role + RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + + // RoleDelete gets the detailed information of a role + RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + + // UserList gets a list of all users + UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + + // RoleList gets a list of all roles + RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) + + // IsPutPermitted checks put permission of the user + IsPutPermitted(authInfo *AuthInfo, key []byte) error + + // IsRangePermitted checks range permission of the user + IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error + + // IsDeleteRangePermitted checks delete-range permission of the user + IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error + + // IsAdminPermitted checks admin permission of the user + IsAdminPermitted(authInfo *AuthInfo) error + + // GenTokenPrefix produces a random string in a case of simple token + // in a case of JWT, it produces an empty string + GenTokenPrefix() (string, error) + + // Revision gets current revision of authStore + Revision() uint64 + + // CheckPassword checks a given pair of username and password is correct + CheckPassword(username, password string) (uint64, error) + + // Close does cleanup of AuthStore + Close() error + + // AuthInfoFromCtx gets AuthInfo from gRPC's context + AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) + + // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context + AuthInfoFromTLS(ctx context.Context) *AuthInfo +} + +type TokenProvider interface { + info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) + assign(ctx context.Context, username string, revision uint64) (string, error) + enable() + disable() + + invalidateUser(string) + genTokenPrefix() (string, error) +} + +type authStore struct { + // atomic operations; need 64-bit align, or 32-bit tests will crash + revision uint64 + + be backend.Backend + enabled bool + enabledMu sync.RWMutex + + rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions + + tokenProvider TokenProvider +} + +func (as *authStore) AuthEnable() error { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if as.enabled { + plog.Noticef("Authentication already enabled") + return nil + } + b := as.be + tx := b.BatchTx() + tx.Lock() + defer func() { + tx.Unlock() + b.ForceCommit() + }() + + u := getUser(tx, rootUser) + if u == nil { + return ErrRootUserNotExist + } + + if !hasRootRole(u) { + return ErrRootRoleNotExist + } + + tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) + + as.enabled = true + as.tokenProvider.enable() + + as.rangePermCache = make(map[string]*unifiedRangePermissions) + + as.setRevision(getRevision(tx)) + + plog.Noticef("Authentication enabled") + + return nil +} + +func (as *authStore) AuthDisable() { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if !as.enabled { + return + } + b := as.be + tx := b.BatchTx() + tx.Lock() + tx.UnsafePut(authBucketName, enableFlagKey, authDisabled) + as.commitRevision(tx) + tx.Unlock() + b.ForceCommit() + + as.enabled = false + as.tokenProvider.disable() + + plog.Noticef("Authentication disabled") +} + +func (as *authStore) Close() error { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if !as.enabled { + return nil + } + as.tokenProvider.disable() + return nil +} + +func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { + if !as.isAuthEnabled() { + return nil, ErrAuthNotEnabled + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, username) + if user == nil { + return nil, ErrAuthFailed + } + + // Password checking is already performed in the API layer, so we don't need to check for now. + // Staleness of password can be detected with OCC in the API layer, too. + + token, err := as.tokenProvider.assign(ctx, username, as.Revision()) + if err != nil { + return nil, err + } + + plog.Debugf("authorized %s, token is %s", username, token) + return &pb.AuthenticateResponse{Token: token}, nil +} + +func (as *authStore) CheckPassword(username, password string) (uint64, error) { + if !as.isAuthEnabled() { + return 0, ErrAuthNotEnabled + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, username) + if user == nil { + return 0, ErrAuthFailed + } + + if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { + plog.Noticef("authentication failed, invalid password for user %s", username) + return 0, ErrAuthFailed + } + + return getRevision(tx), nil +} + +func (as *authStore) Recover(be backend.Backend) { + enabled := false + as.be = be + tx := be.BatchTx() + tx.Lock() + _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) + if len(vs) == 1 { + if bytes.Equal(vs[0], authEnabled) { + enabled = true + } + } + + as.setRevision(getRevision(tx)) + + tx.Unlock() + + as.enabledMu.Lock() + as.enabled = enabled + as.enabledMu.Unlock() +} + +func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + if len(r.Name) == 0 { + return nil, ErrUserEmpty + } + + hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) + if err != nil { + plog.Errorf("failed to hash password: %s", err) + return nil, err + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.Name) + if user != nil { + return nil, ErrUserAlreadyExist + } + + newUser := &authpb.User{ + Name: []byte(r.Name), + Password: hashed, + } + + putUser(tx, newUser) + + as.commitRevision(tx) + + plog.Noticef("added a new user: %s", r.Name) + + return &pb.AuthUserAddResponse{}, nil +} + +func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 { + plog.Errorf("the user root must not be deleted") + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + delUser(tx, r.Name) + + as.commitRevision(tx) + + as.invalidateCachedPerm(r.Name) + as.tokenProvider.invalidateUser(r.Name) + + plog.Noticef("deleted a user: %s", r.Name) + + return &pb.AuthUserDeleteResponse{}, nil +} + +func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword() + // If the cost is too high, we should move the encryption to outside of the raft + hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) + if err != nil { + plog.Errorf("failed to hash password: %s", err) + return nil, err + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + updatedUser := &authpb.User{ + Name: []byte(r.Name), + Roles: user.Roles, + Password: hashed, + } + + putUser(tx, updatedUser) + + as.commitRevision(tx) + + as.invalidateCachedPerm(r.Name) + as.tokenProvider.invalidateUser(r.Name) + + plog.Noticef("changed a password of a user: %s", r.Name) + + return &pb.AuthUserChangePasswordResponse{}, nil +} + +func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.User) + if user == nil { + return nil, ErrUserNotFound + } + + if r.Role != rootRole { + role := getRole(tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + } + + idx := sort.SearchStrings(user.Roles, r.Role) + if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 { + plog.Warningf("user %s is already granted role %s", r.User, r.Role) + return &pb.AuthUserGrantRoleResponse{}, nil + } + + user.Roles = append(user.Roles, r.Role) + sort.Sort(sort.StringSlice(user.Roles)) + + putUser(tx, user) + + as.invalidateCachedPerm(r.User) + + as.commitRevision(tx) + + plog.Noticef("granted role %s to user %s", r.Role, r.User) + return &pb.AuthUserGrantRoleResponse{}, nil +} + +func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthUserGetResponse + + user := getUser(tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + resp.Roles = append(resp.Roles, user.Roles...) + return &resp, nil +} + +func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthUserListResponse + + users := getAllUsers(tx) + + for _, u := range users { + resp.Users = append(resp.Users, string(u.Name)) + } + + return &resp, nil +} + +func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be revoked from the user root") + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + } + + for _, role := range user.Roles { + if strings.Compare(role, r.Role) != 0 { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + + if len(updatedUser.Roles) == len(user.Roles) { + return nil, ErrRoleNotGranted + } + + putUser(tx, updatedUser) + + as.invalidateCachedPerm(r.Name) + + as.commitRevision(tx) + + plog.Noticef("revoked role %s from user %s", r.Role, r.Name) + return &pb.AuthUserRevokeRoleResponse{}, nil +} + +func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthRoleGetResponse + + role := getRole(tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + resp.Perm = append(resp.Perm, role.KeyPermission...) + return &resp, nil +} + +func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthRoleListResponse + + roles := getAllRoles(tx) + + for _, r := range roles { + resp.Roles = append(resp.Roles, string(r.Name)) + } + + return &resp, nil +} + +func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + + updatedRole := &authpb.Role{ + Name: role.Name, + } + + for _, perm := range role.KeyPermission { + if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) { + updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) + } + } + + if len(role.KeyPermission) == len(updatedRole.KeyPermission) { + return nil, ErrPermissionNotGranted + } + + putRole(tx, updatedRole) + + // TODO(mitake): currently single role update invalidates every cache + // It should be optimized. + as.clearCachedPerm() + + as.commitRevision(tx) + + plog.Noticef("revoked key %s from role %s", r.Key, r.Role) + return &pb.AuthRoleRevokePermissionResponse{}, nil +} + +func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + if as.enabled && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be deleted") + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + + delRole(tx, r.Role) + + users := getAllUsers(tx) + for _, user := range users { + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + } + + for _, role := range user.Roles { + if strings.Compare(role, r.Role) != 0 { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + + if len(updatedUser.Roles) == len(user.Roles) { + continue + } + + putUser(tx, updatedUser) + + as.invalidateCachedPerm(string(user.Name)) + } + + as.commitRevision(tx) + + plog.Noticef("deleted role %s", r.Role) + return &pb.AuthRoleDeleteResponse{}, nil +} + +func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(tx, r.Name) + if role != nil { + return nil, ErrRoleAlreadyExist + } + + newRole := &authpb.Role{ + Name: []byte(r.Name), + } + + putRole(tx, newRole) + + as.commitRevision(tx) + + plog.Noticef("Role %s is created", r.Name) + + return &pb.AuthRoleAddResponse{}, nil +} + +func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { + return as.tokenProvider.info(ctx, token, as.Revision()) +} + +type permSlice []*authpb.Permission + +func (perms permSlice) Len() int { + return len(perms) +} + +func (perms permSlice) Less(i, j int) bool { + return bytes.Compare(perms[i].Key, perms[j].Key) < 0 +} + +func (perms permSlice) Swap(i, j int) { + perms[i], perms[j] = perms[j], perms[i] +} + +func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(tx, r.Name) + if role == nil { + return nil, ErrRoleNotFound + } + + idx := sort.Search(len(role.KeyPermission), func(i int) bool { + return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0 + }) + + if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { + // update existing permission + role.KeyPermission[idx].PermType = r.Perm.PermType + } else { + // append new permission to the role + newPerm := &authpb.Permission{ + Key: []byte(r.Perm.Key), + RangeEnd: []byte(r.Perm.RangeEnd), + PermType: r.Perm.PermType, + } + + role.KeyPermission = append(role.KeyPermission, newPerm) + sort.Sort(permSlice(role.KeyPermission)) + } + + putRole(tx, role) + + // TODO(mitake): currently single role update invalidates every cache + // It should be optimized. + as.clearCachedPerm() + + as.commitRevision(tx) + + plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) + + return &pb.AuthRoleGrantPermissionResponse{}, nil +} + +func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { + // TODO(mitake): this function would be costly so we need a caching mechanism + if !as.isAuthEnabled() { + return nil + } + + // only gets rev == 0 when passed AuthInfo{}; no user given + if revision == 0 { + return ErrUserEmpty + } + + if revision < as.Revision() { + return ErrAuthOldRevision + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, userName) + if user == nil { + plog.Errorf("invalid user name %s for permission checking", userName) + return ErrPermissionDenied + } + + // root role should have permission on all ranges + if hasRootRole(user) { + return nil + } + + if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) { + return nil + } + + return ErrPermissionDenied +} + +func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE) +} + +func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ) +} + +func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE) +} + +func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { + if !as.isAuthEnabled() { + return nil + } + if authInfo == nil { + return ErrUserEmpty + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + u := getUser(tx, authInfo.Username) + if u == nil { + return ErrUserNotFound + } + + if !hasRootRole(u) { + return ErrPermissionDenied + } + + return nil +} + +func getUser(tx backend.BatchTx, username string) *authpb.User { + _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0) + if len(vs) == 0 { + return nil + } + + user := &authpb.User{} + err := user.Unmarshal(vs[0]) + if err != nil { + plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) + } + return user +} + +func getAllUsers(tx backend.BatchTx) []*authpb.User { + _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + var users []*authpb.User + + for _, v := range vs { + user := &authpb.User{} + err := user.Unmarshal(v) + if err != nil { + plog.Panicf("failed to unmarshal user struct: %s", err) + } + + users = append(users, user) + } + + return users +} + +func putUser(tx backend.BatchTx, user *authpb.User) { + b, err := user.Marshal() + if err != nil { + plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) + } + tx.UnsafePut(authUsersBucketName, user.Name, b) +} + +func delUser(tx backend.BatchTx, username string) { + tx.UnsafeDelete(authUsersBucketName, []byte(username)) +} + +func getRole(tx backend.BatchTx, rolename string) *authpb.Role { + _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0) + if len(vs) == 0 { + return nil + } + + role := &authpb.Role{} + err := role.Unmarshal(vs[0]) + if err != nil { + plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err) + } + return role +} + +func getAllRoles(tx backend.BatchTx) []*authpb.Role { + _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + var roles []*authpb.Role + + for _, v := range vs { + role := &authpb.Role{} + err := role.Unmarshal(v) + if err != nil { + plog.Panicf("failed to unmarshal role struct: %s", err) + } + + roles = append(roles, role) + } + + return roles +} + +func putRole(tx backend.BatchTx, role *authpb.Role) { + b, err := role.Marshal() + if err != nil { + plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) + } + + tx.UnsafePut(authRolesBucketName, []byte(role.Name), b) +} + +func delRole(tx backend.BatchTx, rolename string) { + tx.UnsafeDelete(authRolesBucketName, []byte(rolename)) +} + +func (as *authStore) isAuthEnabled() bool { + as.enabledMu.RLock() + defer as.enabledMu.RUnlock() + return as.enabled +} + +func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { + tx := be.BatchTx() + tx.Lock() + + tx.UnsafeCreateBucket(authBucketName) + tx.UnsafeCreateBucket(authUsersBucketName) + tx.UnsafeCreateBucket(authRolesBucketName) + + enabled := false + _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) + if len(vs) == 1 { + if bytes.Equal(vs[0], authEnabled) { + enabled = true + } + } + + as := &authStore{ + be: be, + revision: getRevision(tx), + enabled: enabled, + rangePermCache: make(map[string]*unifiedRangePermissions), + tokenProvider: tp, + } + + if enabled { + as.tokenProvider.enable() + } + + if as.Revision() == 0 { + as.commitRevision(tx) + } + + tx.Unlock() + be.ForceCommit() + + return as +} + +func hasRootRole(u *authpb.User) bool { + for _, r := range u.Roles { + if r == rootRole { + return true + } + } + return false +} + +func (as *authStore) commitRevision(tx backend.BatchTx) { + atomic.AddUint64(&as.revision, 1) + revBytes := make([]byte, revBytesLen) + binary.BigEndian.PutUint64(revBytes, as.Revision()) + tx.UnsafePut(authBucketName, revisionKey, revBytes) +} + +func getRevision(tx backend.BatchTx) uint64 { + _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0) + if len(vs) != 1 { + // this can happen in the initialization phase + return 0 + } + + return binary.BigEndian.Uint64(vs[0]) +} + +func (as *authStore) setRevision(rev uint64) { + atomic.StoreUint64(&as.revision, rev) +} + +func (as *authStore) Revision() uint64 { + return atomic.LoadUint64(&as.revision) +} + +func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { + peer, ok := peer.FromContext(ctx) + if !ok || peer == nil || peer.AuthInfo == nil { + return nil + } + + tlsInfo := peer.AuthInfo.(credentials.TLSInfo) + for _, chains := range tlsInfo.State.VerifiedChains { + for _, chain := range chains { + cn := chain.Subject.CommonName + plog.Debugf("found common name %s", cn) + + return &AuthInfo{ + Username: cn, + Revision: as.Revision(), + } + } + } + + return nil +} + +func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, nil + } + + ts, tok := md["token"] + if !tok { + return nil, nil + } + + token := ts[0] + authInfo, uok := as.authInfoFromToken(ctx, token) + if !uok { + plog.Warningf("invalid auth token: %s", token) + return nil, ErrInvalidAuthToken + } + return authInfo, nil +} + +func (as *authStore) GenTokenPrefix() (string, error) { + return as.tokenProvider.genTokenPrefix() +} + +func decomposeOpts(optstr string) (string, map[string]string, error) { + opts := strings.Split(optstr, ",") + tokenType := opts[0] + + typeSpecificOpts := make(map[string]string) + for i := 1; i < len(opts); i++ { + pair := strings.Split(opts[i], "=") + + if len(pair) != 2 { + plog.Errorf("invalid token specific option: %s", optstr) + return "", nil, ErrInvalidAuthOpts + } + + if _, ok := typeSpecificOpts[pair[0]]; ok { + plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + return "", nil, ErrInvalidAuthOpts + } + + typeSpecificOpts[pair[0]] = pair[1] + } + + return tokenType, typeSpecificOpts, nil + +} + +func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { + tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + switch tokenType { + case "simple": + plog.Warningf("simple token is not cryptographically signed") + return newTokenProviderSimple(indexWaiter), nil + case "jwt": + return newTokenProviderJWT(typeSpecificOpts) + default: + plog.Errorf("unknown token type: %s", tokenType) + return nil, ErrInvalidAuthOpts + } +} diff --git a/vendor/github.com/coreos/etcd/client/BUILD b/vendor/github.com/coreos/etcd/client/BUILD new file mode 100644 index 000000000000..00a5b08d87f0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auth_role.go", + "auth_user.go", + "cancelreq.go", + "client.go", + "cluster_error.go", + "curl.go", + "discover.go", + "doc.go", + "keys.generated.go", + "keys.go", + "members.go", + "util.go", + ], + importpath = "github.com/coreos/etcd/client", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/pkg/pathutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/srv:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", + "//vendor/github.com/ugorji/go/codec:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md new file mode 100644 index 000000000000..c0ec81901638 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/README.md @@ -0,0 +1,117 @@ +# etcd/client + +etcd/client is the Go client library for etcd. + +[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) + +etcd uses `cmd/vendor` directory to store external dependencies, which are +to be compiled into etcd release binaries. `client` can be imported without +vendoring. For full compatibility, it is recommended to vendor builds using +etcd's vendored packages, using tools like godep, as in +[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). +For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). + +## Install + +```bash +go get github.com/coreos/etcd/client +``` + +## Usage + +```go +package main + +import ( + "log" + "time" + + "golang.org/x/net/context" + "github.com/coreos/etcd/client" +) + +func main() { + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: client.DefaultTransport, + // set timeout per request to fail fast when the target endpoint is unavailable + HeaderTimeoutPerRequest: time.Second, + } + c, err := client.New(cfg) + if err != nil { + log.Fatal(err) + } + kapi := client.NewKeysAPI(c) + // set "/foo" key with "bar" value + log.Print("Setting '/foo' key with 'bar' value") + resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Set is done. Metadata is %q\n", resp) + } + // get "/foo" key's value + log.Print("Getting '/foo' key value") + resp, err = kapi.Get(context.Background(), "/foo", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Get is done. Metadata is %q\n", resp) + // print value + log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) + } +} +``` + +## Error Handling + +etcd client might return three types of errors. + +- context error + +Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. + +- cluster error + +Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. + +- response error + +If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. + +Here is the example code to handle client errors: + +```go +cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} +c, err := client.New(cfg) +if err != nil { + log.Fatal(err) +} + +kapi := client.NewKeysAPI(c) +resp, err := kapi.Set(ctx, "test", "bar", nil) +if err != nil { + if err == context.Canceled { + // ctx is canceled by another routine + } else if err == context.DeadlineExceeded { + // ctx is attached with a deadline and it exceeded + } else if cerr, ok := err.(*client.ClusterError); ok { + // process (cerr.Errors) + } else { + // bad cluster endpoints, which are not etcd servers + } +} +``` + + +## Caveat + +1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. + +2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. + +3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. + +4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information. diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go new file mode 100644 index 000000000000..d15e00dd7160 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_role.go @@ -0,0 +1,237 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + + "golang.org/x/net/context" +) + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV rwPermission `json:"kv"` +} + +type rwPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type PermissionType int + +const ( + ReadPermission PermissionType = iota + WritePermission + ReadWritePermission +) + +// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to +// interact with etcd's role creation and modification features. +func NewAuthRoleAPI(c Client) AuthRoleAPI { + return &httpAuthRoleAPI{ + client: c, + } +} + +type AuthRoleAPI interface { + // AddRole adds a role. + AddRole(ctx context.Context, role string) error + + // RemoveRole removes a role. + RemoveRole(ctx context.Context, role string) error + + // GetRole retrieves role details. + GetRole(ctx context.Context, role string) (*Role, error) + + // GrantRoleKV grants a role some permission prefixes for the KV store. + GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // RevokeRoleKV revokes some permission prefixes for a role on the KV store. + RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // ListRoles lists roles. + ListRoles(ctx context.Context) ([]string, error) +} + +type httpAuthRoleAPI struct { + client httpClient +} + +type authRoleAPIAction struct { + verb string + name string + role *Role +} + +type authRoleAPIList struct{} + +func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", l.name) + if l.role == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.role) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { + resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + var roleList struct { + Roles []Role `json:"roles"` + } + if err = json.Unmarshal(body, &roleList); err != nil { + return nil, err + } + ret := make([]string, 0, len(roleList.Roles)) + for _, r := range roleList.Roles { + ret = append(ret, r.Role) + } + return ret, nil +} + +func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { + role := &Role{ + Role: rolename, + } + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "DELETE", + name: rolename, + }) +} + +func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return err + } + if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err := json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { + return r.modRole(ctx, &authRoleAPIAction{ + verb: "GET", + name: rolename, + }) +} + +func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { + var out rwPermission + switch permType { + case ReadPermission: + out.Read = prefixes + case WritePermission: + out.Write = prefixes + case ReadWritePermission: + out.Read = prefixes + out.Write = prefixes + } + return out +} + +func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Grant: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Revoke: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var role Role + if err = json.Unmarshal(body, &role); err != nil { + return nil, err + } + return &role, nil +} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go new file mode 100644 index 000000000000..97c3f31813b5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_user.go @@ -0,0 +1,320 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "path" + + "golang.org/x/net/context" +) + +var ( + defaultV2AuthPrefix = "/v2/auth" +) + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +// userListEntry is the user representation given by the server for ListUsers +type userListEntry struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +type UserRoles struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +func v2AuthURL(ep url.URL, action string, name string) *url.URL { + if name != "" { + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) + return &ep + } + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) + return &ep +} + +// NewAuthAPI constructs a new AuthAPI that uses HTTP to +// interact with etcd's general auth features. +func NewAuthAPI(c Client) AuthAPI { + return &httpAuthAPI{ + client: c, + } +} + +type AuthAPI interface { + // Enable auth. + Enable(ctx context.Context) error + + // Disable auth. + Disable(ctx context.Context) error +} + +type httpAuthAPI struct { + client httpClient +} + +func (s *httpAuthAPI) Enable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"PUT"}) +} + +func (s *httpAuthAPI) Disable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"DELETE"}) +} + +func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { + resp, body, err := s.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +type authAPIAction struct { + verb string +} + +func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "enable", "") + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req +} + +type authError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e authError) Error() string { + return e.Message +} + +// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to +// interact with etcd's user creation and modification features. +func NewAuthUserAPI(c Client) AuthUserAPI { + return &httpAuthUserAPI{ + client: c, + } +} + +type AuthUserAPI interface { + // AddUser adds a user. + AddUser(ctx context.Context, username string, password string) error + + // RemoveUser removes a user. + RemoveUser(ctx context.Context, username string) error + + // GetUser retrieves user details. + GetUser(ctx context.Context, username string) (*User, error) + + // GrantUser grants a user some permission roles. + GrantUser(ctx context.Context, username string, roles []string) (*User, error) + + // RevokeUser revokes some permission roles from a user. + RevokeUser(ctx context.Context, username string, roles []string) (*User, error) + + // ChangePassword changes the user's password. + ChangePassword(ctx context.Context, username string, password string) (*User, error) + + // ListUsers lists the users. + ListUsers(ctx context.Context) ([]string, error) +} + +type httpAuthUserAPI struct { + client httpClient +} + +type authUserAPIAction struct { + verb string + username string + user *User +} + +type authUserAPIList struct{} + +func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", l.username) + if l.user == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.user) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { + resp, body, err := u.client.Do(ctx, &authUserAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + + var userList struct { + Users []userListEntry `json:"users"` + } + + if err = json.Unmarshal(body, &userList); err != nil { + return nil, err + } + + ret := make([]string, 0, len(userList.Users)) + for _, u := range userList.Users { + ret = append(ret, u.User) + } + return ret, nil +} + +func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { + user := &User{ + User: username, + Password: password, + } + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "DELETE", + username: username, + }) +} + +func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { + return u.modUser(ctx, &authUserAPIAction{ + verb: "GET", + username: username, + }) +} + +func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Grant: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Revoke: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { + user := &User{ + User: username, + Password: password, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var user User + if err = json.Unmarshal(body, &user); err != nil { + var userR UserRoles + if urerr := json.Unmarshal(body, &userR); urerr != nil { + return nil, err + } + user.User = userR.User + for _, r := range userR.Roles { + user.Roles = append(user.Roles, r.Role) + } + } + return &user, nil +} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go new file mode 100644 index 000000000000..76d1f040198b --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cancelreq.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq.go + +package client + +import "net/http" + +func requestCanceler(tr CancelableTransport, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go new file mode 100644 index 000000000000..19ce2ec01dad --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -0,0 +1,704 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "sync" + "time" + + "github.com/coreos/etcd/version" + + "golang.org/x/net/context" +) + +var ( + ErrNoEndpoints = errors.New("client: no endpoints available") + ErrTooManyRedirects = errors.New("client: too many redirects") + ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") + ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") + errTooManyRedirectChecks = errors.New("client: too many redirect checks") + + // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so + // that Do() will not retry a request + oneShotCtxValue interface{} +) + +var DefaultRequestTimeout = 5 * time.Second + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +type EndpointSelectionMode int + +const ( + // EndpointSelectionRandom is the default value of the 'SelectionMode'. + // As the name implies, the client object will pick a node from the members + // of the cluster in a random fashion. If the cluster has three members, A, B, + // and C, the client picks any node from its three members as its request + // destination. + EndpointSelectionRandom EndpointSelectionMode = iota + + // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', + // requests are sent directly to the cluster leader. This reduces + // forwarding roundtrips compared to making requests to etcd followers + // who then forward them to the cluster leader. In the event of a leader + // failure, however, clients configured this way cannot prioritize among + // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' + // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to + // maintain its knowledge of current cluster state. + // + // This mode should be used with Client.AutoSync(). + EndpointSelectionPrioritizeLeader +) + +type Config struct { + // Endpoints defines a set of URLs (schemes, hosts and ports only) + // that can be used to communicate with a logical etcd cluster. For + // example, a three-node cluster could be provided like so: + // + // Endpoints: []string{ + // "http://node1.example.com:2379", + // "http://node2.example.com:2379", + // "http://node3.example.com:2379", + // } + // + // If multiple endpoints are provided, the Client will attempt to + // use them all in the event that one or more of them are unusable. + // + // If Client.Sync is ever called, the Client may cache an alternate + // set of endpoints to continue operation. + Endpoints []string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport + + // CheckRedirect specifies the policy for handling HTTP redirects. + // If CheckRedirect is not nil, the Client calls it before + // following an HTTP redirect. The sole argument is the number of + // requests that have already been made. If CheckRedirect returns + // an error, Client.Do will not make any further requests and return + // the error back it to the caller. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect CheckRedirectFunc + + // Username specifies the user credential to add as an authorization header + Username string + + // Password is the password for the specified user to add as an authorization header + // to the request. + Password string + + // HeaderTimeoutPerRequest specifies the time limit to wait for response + // header in a single request made by the Client. The timeout includes + // connection time, any redirects, and header wait time. + // + // For non-watch GET request, server returns the response body immediately. + // For PUT/POST/DELETE request, server will attempt to commit request + // before responding, which is expected to take `100ms + 2 * RTT`. + // For watch request, server returns the header immediately to notify Client + // watch start. But if server is behind some kind of proxy, the response + // header may be cached at proxy, and Client cannot rely on this behavior. + // + // Especially, wait request will ignore this timeout. + // + // One API call may send multiple requests to different etcd servers until it + // succeeds. Use context of the API to specify the overall timeout. + // + // A HeaderTimeoutPerRequest of zero means no timeout. + HeaderTimeoutPerRequest time.Duration + + // SelectionMode is an EndpointSelectionMode enum that specifies the + // policy for choosing the etcd cluster node to which requests are sent. + SelectionMode EndpointSelectionMode +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +func (cfg *Config) checkRedirect() CheckRedirectFunc { + if cfg.CheckRedirect == nil { + return DefaultCheckRedirect + } + return cfg.CheckRedirect +} + +// CancelableTransport mimics net/http.Transport, but requires that +// the object also support request cancellation. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +type CheckRedirectFunc func(via int) error + +// DefaultCheckRedirect follows up to 10 redirects, but no more. +var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { + if via > 10 { + return ErrTooManyRedirects + } + return nil +} + +type Client interface { + // Sync updates the internal cache of the etcd cluster's membership. + Sync(context.Context) error + + // AutoSync periodically calls Sync() every given interval. + // The recommended sync interval is 10 seconds to 1 minute, which does + // not bring too much overhead to server and makes client catch up the + // cluster change in time. + // + // The example to use it: + // + // for { + // err := client.AutoSync(ctx, 10*time.Second) + // if err == context.DeadlineExceeded || err == context.Canceled { + // break + // } + // log.Print(err) + // } + AutoSync(context.Context, time.Duration) error + + // Endpoints returns a copy of the current set of API endpoints used + // by Client to resolve HTTP requests. If Sync has ever been called, + // this may differ from the initial Endpoints provided in the Config. + Endpoints() []string + + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + + httpClient +} + +func New(cfg Config) (Client, error) { + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), + rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + selectionMode: cfg.SelectionMode, + } + if cfg.Username != "" { + c.credentials = &credentials{ + username: cfg.Username, + password: cfg.Password, + } + } + if err := c.SetEndpoints(cfg.Endpoints); err != nil { + return nil, err + } + return c, nil +} + +type httpClient interface { + Do(context.Context, httpAction) (*http.Response, []byte, error) +} + +func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { + return func(ep url.URL) httpClient { + return &redirectFollowingHTTPClient{ + checkRedirect: cr, + client: &simpleHTTPClient{ + transport: tr, + endpoint: ep, + headerTimeout: headerTimeout, + }, + } + } +} + +type credentials struct { + username string + password string +} + +type httpClientFactory func(url.URL) httpClient + +type httpAction interface { + HTTPRequest(url.URL) *http.Request +} + +type httpClusterClient struct { + clientFactory httpClientFactory + endpoints []url.URL + pinned int + credentials *credentials + sync.RWMutex + rand *rand.Rand + selectionMode EndpointSelectionMode +} + +func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { + ceps := make([]url.URL, len(eps)) + copy(ceps, eps) + + // To perform a lookup on the new endpoint list without using the current + // client, we'll copy it + clientCopy := &httpClusterClient{ + clientFactory: c.clientFactory, + credentials: c.credentials, + rand: c.rand, + + pinned: 0, + endpoints: ceps, + } + + mAPI := NewMembersAPI(clientCopy) + leader, err := mAPI.Leader(ctx) + if err != nil { + return "", err + } + if len(leader.ClientURLs) == 0 { + return "", ErrNoLeaderEndpoint + } + + return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? +} + +func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { + if len(eps) == 0 { + return []url.URL{}, ErrNoEndpoints + } + + neps := make([]url.URL, len(eps)) + for i, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + return []url.URL{}, err + } + neps[i] = *u + } + return neps, nil +} + +func (c *httpClusterClient) SetEndpoints(eps []string) error { + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + c.endpoints = shuffleEndpoints(c.rand, neps) + // We're not doing anything for PrioritizeLeader here. This is + // due to not having a context meaning we can't call getLeaderEndpoint + // However, if you're using PrioritizeLeader, you've already been told + // to regularly call sync, where we do have a ctx, and can figure the + // leader. PrioritizeLeader is also quite a loose guarantee, so deal + // with it + c.pinned = 0 + + return nil +} + +func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + action := act + c.RLock() + leps := len(c.endpoints) + eps := make([]url.URL, leps) + n := copy(eps, c.endpoints) + pinned := c.pinned + + if c.credentials != nil { + action = &authedAction{ + act: act, + credentials: *c.credentials, + } + } + c.RUnlock() + + if leps == 0 { + return nil, nil, ErrNoEndpoints + } + + if leps != n { + return nil, nil, errors.New("unable to pick endpoint: copy failed") + } + + var resp *http.Response + var body []byte + var err error + cerr := &ClusterError{} + isOneShot := ctx.Value(&oneShotCtxValue) != nil + + for i := pinned; i < leps+pinned; i++ { + k := i % leps + hc := c.clientFactory(eps[k]) + resp, body, err = hc.Do(ctx, action) + if err != nil { + cerr.Errors = append(cerr.Errors, err) + if err == ctx.Err() { + return nil, nil, ctx.Err() + } + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + } else if resp.StatusCode/100 == 5 { + switch resp.StatusCode { + case http.StatusInternalServerError, http.StatusServiceUnavailable: + // TODO: make sure this is a no leader response + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) + default: + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) + } + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue + } + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err + } + if k != pinned { + c.Lock() + c.pinned = k + c.Unlock() + } + return resp, body, nil + } + + return nil, nil, cerr +} + +func (c *httpClusterClient) Endpoints() []string { + c.RLock() + defer c.RUnlock() + + eps := make([]string, len(c.endpoints)) + for i, ep := range c.endpoints { + eps[i] = ep.String() + } + + return eps +} + +func (c *httpClusterClient) Sync(ctx context.Context) error { + mAPI := NewMembersAPI(c) + ms, err := mAPI.List(ctx) + if err != nil { + return err + } + + var eps []string + for _, m := range ms { + eps = append(eps, m.ClientURLs...) + } + + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + npin := 0 + + switch c.selectionMode { + case EndpointSelectionRandom: + c.RLock() + eq := endpointsEqual(c.endpoints, neps) + c.RUnlock() + + if eq { + return nil + } + // When items in the endpoint list changes, we choose a new pin + neps = shuffleEndpoints(c.rand, neps) + case EndpointSelectionPrioritizeLeader: + nle, err := c.getLeaderEndpoint(ctx, neps) + if err != nil { + return ErrNoLeaderEndpoint + } + + for i, n := range neps { + if n.String() == nle { + npin = i + break + } + } + default: + return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) + } + + c.Lock() + defer c.Unlock() + c.endpoints = neps + c.pinned = npin + + return nil +} + +func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + err := c.Sync(ctx) + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + +type roundTripResponse struct { + resp *http.Response + err error +} + +type simpleHTTPClient struct { + transport CancelableTransport + endpoint url.URL + headerTimeout time.Duration +} + +func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + req := act.HTTPRequest(c.endpoint) + + if err := printcURL(req); err != nil { + return nil, nil, err + } + + isWait := false + if req != nil && req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + + var hctx context.Context + var hcancel context.CancelFunc + if !isWait && c.headerTimeout > 0 { + hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) + } else { + hctx, hcancel = context.WithCancel(ctx) + } + defer hcancel() + + reqcancel := requestCanceler(c.transport, req) + + rtchan := make(chan roundTripResponse, 1) + go func() { + resp, err := c.transport.RoundTrip(req) + rtchan <- roundTripResponse{resp: resp, err: err} + close(rtchan) + }() + + var resp *http.Response + var err error + + select { + case rtresp := <-rtchan: + resp, err = rtresp.resp, rtresp.err + case <-hctx.Done(): + // cancel and wait for request to actually exit before continuing + reqcancel() + rtresp := <-rtchan + resp = rtresp.resp + switch { + case ctx.Err() != nil: + err = ctx.Err() + case hctx.Err() != nil: + err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) + default: + panic("failed to get error from context") + } + } + + // always check for resp nil-ness to deal with possible + // race conditions between channels above + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + resp.Body.Close() + <-done + return nil, nil, ctx.Err() + case <-done: + } + + return resp, body, err +} + +type authedAction struct { + act httpAction + credentials credentials +} + +func (a *authedAction) HTTPRequest(url url.URL) *http.Request { + r := a.act.HTTPRequest(url) + r.SetBasicAuth(a.credentials.username, a.credentials.password) + return r +} + +type redirectFollowingHTTPClient struct { + client httpClient + checkRedirect CheckRedirectFunc +} + +func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + next := act + for i := 0; i < 100; i++ { + if i > 0 { + if err := r.checkRedirect(i); err != nil { + return nil, nil, err + } + } + resp, body, err := r.client.Do(ctx, next) + if err != nil { + return nil, nil, err + } + if resp.StatusCode/100 == 3 { + hdr := resp.Header.Get("Location") + if hdr == "" { + return nil, nil, fmt.Errorf("Location header not set") + } + loc, err := url.Parse(hdr) + if err != nil { + return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) + } + next = &redirectedHTTPAction{ + action: act, + location: *loc, + } + continue + } + return resp, body, nil + } + + return nil, nil, errTooManyRedirectChecks +} + +type redirectedHTTPAction struct { + action httpAction + location url.URL +} + +func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { + orig := r.action.HTTPRequest(ep) + orig.URL = &r.location + return orig +} + +func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { + p := r.Perm(len(eps)) + neps := make([]url.URL, len(eps)) + for i, k := range p { + neps[i] = eps[k] + } + return neps +} + +func endpointsEqual(left, right []url.URL) bool { + if len(left) != len(right) { + return false + } + + sLeft := make([]string, len(left)) + sRight := make([]string, len(right)) + for i, l := range left { + sLeft[i] = l.String() + } + for i, r := range right { + sRight[i] = r.String() + } + + sort.Strings(sLeft) + sort.Strings(sRight) + for i := range sLeft { + if sLeft[i] != sRight[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go new file mode 100644 index 000000000000..34618cdbd9e6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -0,0 +1,37 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import "fmt" + +type ClusterError struct { + Errors []error +} + +func (ce *ClusterError) Error() string { + s := ErrClusterUnavailable.Error() + for i, e := range ce.Errors { + s += fmt.Sprintf("; error #%d: %s\n", i, e) + } + return s +} + +func (ce *ClusterError) Detail() string { + s := "" + for i, e := range ce.Errors { + s += fmt.Sprintf("error #%d: %s\n", i, e) + } + return s +} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go new file mode 100644 index 000000000000..c8bc9fba20ec --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/curl.go @@ -0,0 +1,70 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" +) + +var ( + cURLDebug = false +) + +func EnablecURLDebug() { + cURLDebug = true +} + +func DisablecURLDebug() { + cURLDebug = false +} + +// printcURL prints the cURL equivalent request to stderr. +// It returns an error if the body of the request cannot +// be read. +// The caller MUST cancel the request if there is an error. +func printcURL(req *http.Request) error { + if !cURLDebug { + return nil + } + var ( + command string + b []byte + err error + ) + + if req.URL != nil { + command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) + } + + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return err + } + command += fmt.Sprintf(" -d %q", string(b)) + } + + fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + + // reset body + body := bytes.NewBuffer(b) + req.Body = ioutil.NopCloser(body) + + return nil +} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go new file mode 100644 index 000000000000..442e35fe543b --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "github.com/coreos/etcd/pkg/srv" +) + +// Discoverer is an interface that wraps the Discover method. +type Discoverer interface { + // Discover looks up the etcd servers for the domain. + Discover(domain string) ([]string, error) +} + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go new file mode 100644 index 000000000000..32fdfb52c670 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/doc.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides bindings for the etcd APIs. + +Create a Config and exchange it for a Client: + + import ( + "net/http" + + "github.com/coreos/etcd/client" + "golang.org/x/net/context" + ) + + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: DefaultTransport, + } + + c, err := client.New(cfg) + if err != nil { + // handle error + } + +Clients are safe for concurrent use by multiple goroutines. + +Create a KeysAPI using the Client, then use it to interact with etcd: + + kAPI := client.NewKeysAPI(c) + + // create a new key /foo with the value "bar" + _, err = kAPI.Create(context.Background(), "/foo", "bar") + if err != nil { + // handle error + } + + // delete the newly created key only if the value is still "bar" + _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) + if err != nil { + // handle error + } + +Use a custom context to set timeouts on your operations: + + import "time" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // set a new key, ignoring it's previous state + _, err := kAPI.Set(ctx, "/ping", "pong", nil) + if err != nil { + if err == context.DeadlineExceeded { + // request took longer than 5s + } else { + // handle error + } + } + +*/ +package client diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go new file mode 100644 index 000000000000..216139c9cc71 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -0,0 +1,1087 @@ +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package client + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81819 = 1 + codecSelferC_RAW1819 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1819 = 10 + codecSelferValueTypeMap1819 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1819 = 2 + codecSelfer_containerMapValue1819 = 3 + codecSelfer_containerMapEnd1819 = 4 + codecSelfer_containerArrayElem1819 = 6 + codecSelfer_containerArrayEnd1819 = 7 +) + +var ( + codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1819 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 time.Time + _ = v0 + } +} + +func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Action)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("action")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Action)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("node")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("prevNode")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1819) + } + } + } +} + +func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1819 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1819) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1819 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) + } + } +} + +func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1819) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1819) + switch yys3 { + case "action": + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv4 := &x.Action + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "node": + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + case "prevNode": + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1819) +} + +func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv9 := &x.Action + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Dir != false + yyq2[6] = x.Expiration != nil + yyq2[7] = x.TTL != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("dir")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81819, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("nodes")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq2[6] { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { + r.EncodeBuiltin(yym23, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym22 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("expiration")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + if x.Expiration == nil { + r.EncodeNil() + } else { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { + r.EncodeBuiltin(yym25, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym24 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1819) + r.EncodeString(codecSelferC_UTF81819, string("ttl")) + z.EncSendContainerState(codecSelfer_containerMapValue1819) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1819) + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1819 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1819) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1819 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1819) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1819) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "nodes": + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv10 := &x.Nodes + yyv10.CodecDecodeSelf(d) + } + case "createdIndex": + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv11 := &x.CreatedIndex + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) + } + } + case "modifiedIndex": + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv13 := &x.ModifiedIndex + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + case "expiration": + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { + r.DecodeBuiltin(yym17, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym16 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + case "ttl": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv18 := &x.TTL + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int64)(yyv18)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1819) +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv21 := &x.Key + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv23 := &x.Dir + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv25 := &x.Value + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv27 := &x.Nodes + yyv27.CodecDecodeSelf(d) + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv28 := &x.CreatedIndex + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv30 := &x.ModifiedIndex + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym33 := z.DecBinary() + _ = yym33 + if false { + } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { + r.DecodeBuiltin(yym34, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym33 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym33 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1819) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encNodes((Nodes)(x), e) + } + } +} + +func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decNodes((*Nodes)(x), d) + } +} + +func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1819) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1819) +} + +func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { + var h codecSelfer1819 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*Node, yyrl1) + } + } else { + yyv1 = make([]*Node, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *Node + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*Node{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go new file mode 100644 index 000000000000..4a6c41a78bc8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -0,0 +1,682 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/coreos/etcd/pkg/pathutil" + "github.com/ugorji/go/codec" + "golang.org/x/net/context" +) + +const ( + ErrorCodeKeyNotFound = 100 + ErrorCodeTestFailed = 101 + ErrorCodeNotFile = 102 + ErrorCodeNotDir = 104 + ErrorCodeNodeExist = 105 + ErrorCodeRootROnly = 107 + ErrorCodeDirNotEmpty = 108 + ErrorCodeUnauthorized = 110 + + ErrorCodePrevValueRequired = 201 + ErrorCodeTTLNaN = 202 + ErrorCodeIndexNaN = 203 + ErrorCodeInvalidField = 209 + ErrorCodeInvalidForm = 210 + + ErrorCodeRaftInternal = 300 + ErrorCodeLeaderElect = 301 + + ErrorCodeWatcherCleared = 400 + ErrorCodeEventIndexCleared = 401 +) + +type Error struct { + Code int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause"` + Index uint64 `json:"index"` +} + +func (e Error) Error() string { + return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) +} + +var ( + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") + ErrEmptyBody = errors.New("client: response body is empty") +) + +// PrevExistType is used to define an existence condition when setting +// or deleting Nodes. +type PrevExistType string + +const ( + PrevIgnore = PrevExistType("") + PrevExist = PrevExistType("true") + PrevNoExist = PrevExistType("false") +) + +var ( + defaultV2KeysPrefix = "/v2/keys" +) + +// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value +// API over HTTP. +func NewKeysAPI(c Client) KeysAPI { + return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) +} + +// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller +// to provide a custom base URL path. This should only be used in +// very rare cases. +func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { + return &httpKeysAPI{ + client: c, + prefix: p, + } +} + +type KeysAPI interface { + // Get retrieves a set of Nodes from etcd + Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) + + // Set assigns a new value to a Node identified by a given key. The caller + // may define a set of conditions in the SetOptions. If SetOptions.Dir=true + // then value is ignored. + Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) + + // Delete removes a Node identified by the given key, optionally destroying + // all of its children as well. The caller may define a set of required + // conditions in an DeleteOptions object. + Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) + + // Create is an alias for Set w/ PrevExist=false + Create(ctx context.Context, key, value string) (*Response, error) + + // CreateInOrder is used to atomically create in-order keys within the given directory. + CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) + + // Update is an alias for Set w/ PrevExist=true + Update(ctx context.Context, key, value string) (*Response, error) + + // Watcher builds a new Watcher targeted at a specific Node identified + // by the given key. The Watcher may be configured at creation time + // through a WatcherOptions object. The returned Watcher is designed + // to emit events that happen to a Node, and optionally to its children. + Watcher(key string, opts *WatcherOptions) Watcher +} + +type WatcherOptions struct { + // AfterIndex defines the index after-which the Watcher should + // start emitting events. For example, if a value of 5 is + // provided, the first event will have an index >= 6. + // + // Setting AfterIndex to 0 (default) means that the Watcher + // should start watching for events starting at the current + // index, whatever that may be. + AfterIndex uint64 + + // Recursive specifies whether or not the Watcher should emit + // events that occur in children of the given keyspace. If set + // to false (default), events will be limited to those that + // occur for the exact key. + Recursive bool +} + +type CreateInOrderOptions struct { + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration +} + +type SetOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Set operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + // + // PrevValue is ignored if Dir=true + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Set operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // PrevExist specifies whether the Node must currently exist + // (PrevExist) or not (PrevNoExist). If the caller does not + // care about existence, set PrevExist to PrevIgnore, or simply + // leave it unset. + PrevExist PrevExistType + + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration + + // Refresh set to true means a TTL value can be updated + // without firing a watch or changing the node value. A + // value must not be provided when refreshing a key. + Refresh bool + + // Dir specifies whether or not this Node should be created as a directory. + Dir bool + + // NoValueOnSuccess specifies whether the response contains the current value of the Node. + // If set, the response will only contain the current value when the request fails. + NoValueOnSuccess bool +} + +type GetOptions struct { + // Recursive defines whether or not all children of the Node + // should be returned. + Recursive bool + + // Sort instructs the server whether or not to sort the Nodes. + // If true, the Nodes are sorted alphabetically by key in + // ascending order (A to z). If false (default), the Nodes will + // not be sorted and the ordering used should not be considered + // predictable. + Sort bool + + // Quorum specifies whether it gets the latest committed value that + // has been applied in quorum of members, which ensures external + // consistency (or linearizability). + Quorum bool +} + +type DeleteOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Delete operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Delete operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // Recursive defines whether or not all children of the Node + // should be deleted. If set to true, all children of the Node + // identified by the given key will be deleted. If left unset + // or explicitly set to false, only a single Node will be + // deleted. + Recursive bool + + // Dir specifies whether or not this Node should be removed as a directory. + Dir bool +} + +type Watcher interface { + // Next blocks until an etcd event occurs, then returns a Response + // representing that event. The behavior of Next depends on the + // WatcherOptions used to construct the Watcher. Next is designed to + // be called repeatedly, each time blocking until a subsequent event + // is available. + // + // If the provided context is cancelled, Next will return a non-nil + // error. Any other failures encountered while waiting for the next + // event (connection issues, deserialization failures, etc) will + // also result in a non-nil error. + Next(context.Context) (*Response, error) +} + +type Response struct { + // Action is the name of the operation that occurred. Possible values + // include get, set, delete, update, create, compareAndSwap, + // compareAndDelete and expire. + Action string `json:"action"` + + // Node represents the state of the relevant etcd Node. + Node *Node `json:"node"` + + // PrevNode represents the previous state of the Node. PrevNode is non-nil + // only if the Node existed before the action occurred and the action + // caused a change to the Node. + PrevNode *Node `json:"prevNode"` + + // Index holds the cluster-level index at the time the Response was generated. + // This index is not tied to the Node(s) contained in this Response. + Index uint64 `json:"-"` + + // ClusterID holds the cluster-level ID reported by the server. This + // should be different for different etcd clusters. + ClusterID string `json:"-"` +} + +type Node struct { + // Key represents the unique location of this Node (e.g. "/foo/bar"). + Key string `json:"key"` + + // Dir reports whether node describes a directory. + Dir bool `json:"dir,omitempty"` + + // Value is the current data stored on this Node. If this Node + // is a directory, Value will be empty. + Value string `json:"value"` + + // Nodes holds the children of this Node, only if this Node is a directory. + // This slice of will be arbitrarily deep (children, grandchildren, great- + // grandchildren, etc.) if a recursive Get or Watch request were made. + Nodes Nodes `json:"nodes"` + + // CreatedIndex is the etcd index at-which this Node was created. + CreatedIndex uint64 `json:"createdIndex"` + + // ModifiedIndex is the etcd index at-which this Node was last modified. + ModifiedIndex uint64 `json:"modifiedIndex"` + + // Expiration is the server side expiration time of the key. + Expiration *time.Time `json:"expiration,omitempty"` + + // TTL is the time to live of the key in second. + TTL int64 `json:"ttl,omitempty"` +} + +func (n *Node) String() string { + return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) +} + +// TTLDuration returns the Node's TTL as a time.Duration object +func (n *Node) TTLDuration() time.Duration { + return time.Duration(n.TTL) * time.Second +} + +type Nodes []*Node + +// interfaces for sorting + +func (ns Nodes) Len() int { return len(ns) } +func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } +func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } + +type httpKeysAPI struct { + client httpClient + prefix string +} + +func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { + act := &setAction{ + Prefix: k.prefix, + Key: key, + Value: val, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.PrevExist = opts.PrevExist + act.TTL = opts.TTL + act.Refresh = opts.Refresh + act.Dir = opts.Dir + act.NoValueOnSuccess = opts.NoValueOnSuccess + } + + doCtx := ctx + if act.PrevExist == PrevNoExist { + doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) + } + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) +} + +func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { + act := &createInOrderAction{ + Prefix: k.prefix, + Dir: dir, + Value: val, + } + + if opts != nil { + act.TTL = opts.TTL + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) +} + +func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { + act := &deleteAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.Dir = opts.Dir + act.Recursive = opts.Recursive + } + + doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { + act := &getAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + act.Sorted = opts.Sort + act.Quorum = opts.Quorum + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { + act := waitAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + if opts.AfterIndex > 0 { + act.WaitIndex = opts.AfterIndex + 1 + } + } + + return &httpWatcher{ + client: k.client, + nextWait: act, + } +} + +type httpWatcher struct { + client httpClient + nextWait waitAction +} + +func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { + for { + httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) + if err != nil { + return nil, err + } + + resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) + if err != nil { + if err == ErrEmptyBody { + continue + } + return nil, err + } + + hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 + return resp, nil + } +} + +// v2KeysURL forms a URL representing the location of a key. +// The endpoint argument represents the base URL of an etcd +// server. The prefix is the path needed to route from the +// provided endpoint's path to the root of the keys API +// (typically "/v2/keys"). +func v2KeysURL(ep url.URL, prefix, key string) *url.URL { + // We concatenate all parts together manually. We cannot use + // path.Join because it does not reserve trailing slash. + // We call CanonicalURLPath to further cleanup the path. + if prefix != "" && prefix[0] != '/' { + prefix = "/" + prefix + } + if key != "" && key[0] != '/' { + key = "/" + key + } + ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) + return &ep +} + +type getAction struct { + Prefix string + Key string + Recursive bool + Sorted bool + Quorum bool +} + +func (g *getAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, g.Prefix, g.Key) + + params := u.Query() + params.Set("recursive", strconv.FormatBool(g.Recursive)) + params.Set("sorted", strconv.FormatBool(g.Sorted)) + params.Set("quorum", strconv.FormatBool(g.Quorum)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type waitAction struct { + Prefix string + Key string + WaitIndex uint64 + Recursive bool +} + +func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, w.Prefix, w.Key) + + params := u.Query() + params.Set("wait", "true") + params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) + params.Set("recursive", strconv.FormatBool(w.Recursive)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type setAction struct { + Prefix string + Key string + Value string + PrevValue string + PrevIndex uint64 + PrevExist PrevExistType + TTL time.Duration + Refresh bool + Dir bool + NoValueOnSuccess bool +} + +func (a *setAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + form := url.Values{} + + // we're either creating a directory or setting a key + if a.Dir { + params.Set("dir", strconv.FormatBool(a.Dir)) + } else { + // These options are only valid for setting a key + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + form.Add("value", a.Value) + } + + // Options which apply to both setting a key and creating a dir + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.PrevExist != PrevIgnore { + params.Set("prevExist", string(a.PrevExist)) + } + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + + if a.Refresh { + form.Add("refresh", "true") + } + if a.NoValueOnSuccess { + params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) + } + + u.RawQuery = params.Encode() + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("PUT", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type deleteAction struct { + Prefix string + Key string + PrevValue string + PrevIndex uint64 + Dir bool + Recursive bool +} + +func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.Dir { + params.Set("dir", "true") + } + if a.Recursive { + params.Set("recursive", "true") + } + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("DELETE", u.String(), nil) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type createInOrderAction struct { + Prefix string + Dir string + Value string + TTL time.Duration +} + +func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Dir) + + form := url.Values{} + form.Add("value", a.Value) + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("POST", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req +} + +func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { + switch code { + case http.StatusOK, http.StatusCreated: + if len(body) == 0 { + return nil, ErrEmptyBody + } + res, err = unmarshalSuccessfulKeysResponse(header, body) + default: + err = unmarshalFailedKeysResponse(body) + } + + return +} + +func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { + var res Response + err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) + if err != nil { + return nil, ErrInvalidJSON + } + if header.Get("X-Etcd-Index") != "" { + res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) + if err != nil { + return nil, err + } + } + res.ClusterID = header.Get("X-Etcd-Cluster-ID") + return &res, nil +} + +func unmarshalFailedKeysResponse(body []byte) error { + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return ErrInvalidJSON + } + return etcdErr +} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go new file mode 100644 index 000000000000..23adf07ad91b --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/members.go @@ -0,0 +1,304 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + + "golang.org/x/net/context" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + defaultV2MembersPrefix = "/v2/members" + defaultLeaderSuffix = "/leader" +) + +type Member struct { + // ID is the unique identifier of this Member. + ID string `json:"id"` + + // Name is a human-readable, non-unique identifier of this Member. + Name string `json:"name"` + + // PeerURLs represents the HTTP(S) endpoints this Member uses to + // participate in etcd's consensus protocol. + PeerURLs []string `json:"peerURLs"` + + // ClientURLs represents the HTTP(S) endpoints on which this Member + // serves it's client-facing APIs. + ClientURLs []string `json:"clientURLs"` +} + +type memberCollection []Member + +func (c *memberCollection) UnmarshalJSON(data []byte) error { + d := struct { + Members []Member + }{} + + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + if d.Members == nil { + *c = make([]Member, 0) + return nil + } + + *c = d.Members + return nil +} + +type memberCreateOrUpdateRequest struct { + PeerURLs types.URLs +} + +func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{ + PeerURLs: make([]string, len(m.PeerURLs)), + } + + for i, u := range m.PeerURLs { + s.PeerURLs[i] = u.String() + } + + return json.Marshal(&s) +} + +// NewMembersAPI constructs a new MembersAPI that uses HTTP to +// interact with etcd's membership API. +func NewMembersAPI(c Client) MembersAPI { + return &httpMembersAPI{ + client: c, + } +} + +type MembersAPI interface { + // List enumerates the current cluster membership. + List(ctx context.Context) ([]Member, error) + + // Add instructs etcd to accept a new Member into the cluster. + Add(ctx context.Context, peerURL string) (*Member, error) + + // Remove demotes an existing Member out of the cluster. + Remove(ctx context.Context, mID string) error + + // Update instructs etcd to update an existing Member in the cluster. + Update(ctx context.Context, mID string, peerURLs []string) error + + // Leader gets current leader of the cluster + Leader(ctx context.Context) (*Member, error) +} + +type httpMembersAPI struct { + client httpClient +} + +func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { + req := &membersAPIActionList{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var mCollection memberCollection + if err := json.Unmarshal(body, &mCollection); err != nil { + return nil, err + } + + return []Member(mCollection), nil +} + +func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { + urls, err := types.NewURLs([]string{peerURL}) + if err != nil { + return nil, err + } + + req := &membersAPIActionAdd{peerURLs: urls} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusCreated { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return nil, err + } + return nil, merr + } + + var memb Member + if err := json.Unmarshal(body, &memb); err != nil { + return nil, err + } + + return &memb, nil +} + +func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { + urls, err := types.NewURLs(peerURLs) + if err != nil { + return err + } + + req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return err + } + return merr + } + + return nil +} + +func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { + req := &membersAPIActionRemove{memberID: memberID} + resp, _, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) +} + +func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { + req := &membersAPIActionLeader{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var leader Member + if err := json.Unmarshal(body, &leader); err != nil { + return nil, err + } + + return &leader, nil +} + +type membersAPIActionList struct{} + +func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type membersAPIActionRemove struct { + memberID string +} + +func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, d.memberID) + req, _ := http.NewRequest("DELETE", u.String(), nil) + return req +} + +type membersAPIActionAdd struct { + peerURLs types.URLs +} + +func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +type membersAPIActionUpdate struct { + memberID string + peerURLs types.URLs +} + +func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + u.Path = path.Join(u.Path, a.memberID) + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +func assertStatusCode(got int, want ...int) (err error) { + for _, w := range want { + if w == got { + return nil + } + } + return fmt.Errorf("unexpected status code %d", got) +} + +type membersAPIActionLeader struct{} + +func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, defaultLeaderSuffix) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +// v2MembersURL add the necessary path to the provided endpoint +// to route requests to the default v2 members API. +func v2MembersURL(ep url.URL) *url.URL { + ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) + return &ep +} + +type membersError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e membersError) Error() string { + return e.Message +} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go new file mode 100644 index 000000000000..15a8babff4d4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/util.go @@ -0,0 +1,53 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "regexp" +) + +var ( + roleNotFoundRegExp *regexp.Regexp + userNotFoundRegExp *regexp.Regexp +) + +func init() { + roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") + userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") +} + +// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. +func IsKeyNotFound(err error) bool { + if cErr, ok := err.(Error); ok { + return cErr.Code == ErrorCodeKeyNotFound + } + return false +} + +// IsRoleNotFound returns true if the error means role not found of v2 API. +func IsRoleNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return roleNotFoundRegExp.MatchString(ae.Message) + } + return false +} + +// IsUserNotFound returns true if the error means user not found of v2 API. +func IsUserNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return userNotFoundRegExp.MatchString(ae.Message) + } + return false +} diff --git a/vendor/github.com/coreos/etcd/clientv3/BUILD b/vendor/github.com/coreos/etcd/clientv3/BUILD new file mode 100644 index 000000000000..a434ea43e826 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/BUILD @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auth.go", + "client.go", + "cluster.go", + "compact_op.go", + "compare.go", + "config.go", + "doc.go", + "grpc_options.go", + "health_balancer.go", + "kv.go", + "lease.go", + "logger.go", + "maintenance.go", + "op.go", + "ready_wait.go", + "retry.go", + "sort.go", + "txn.go", + "watch.go", + ], + importpath = "github.com/coreos/etcd/clientv3", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md new file mode 100644 index 000000000000..376bfba7614d --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/README.md @@ -0,0 +1,85 @@ +# etcd/clientv3 + +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) + +`etcd/clientv3` is the official Go etcd client for v3. + +## Install + +```bash +go get github.com/coreos/etcd/clientv3 +``` + +## Get started + +Create client using `clientv3.New`: + +```go +cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, +}) +if err != nil { + // handle error! +} +defer cli.Close() +``` + +etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses +[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. +If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, +pass `context.WithTimeout` to APIs: + +```go +ctx, cancel := context.WithTimeout(context.Background(), timeout) +resp, err := cli.Put(ctx, "sample_key", "sample_value") +cancel() +if err != nil { + // handle error! +} +// use the response +``` + +etcd uses `cmd/vendor` directory to store external dependencies, which are +to be compiled into etcd release binaries. `client` can be imported without +vendoring. For full compatibility, it is recommended to vendor builds using +etcd's vendored packages, using tools like godep, as in +[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). +For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). + +## Error Handling + +etcd client returns 2 types of errors: + +1. context error: canceled or deadline exceeded. +2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes). + +Here is the example code to handle client errors: + +```go +resp, err := cli.Put(ctx, "", "") +if err != nil { + switch err { + case context.Canceled: + log.Fatalf("ctx is canceled by another routine: %v", err) + case context.DeadlineExceeded: + log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) + case rpctypes.ErrEmptyKey: + log.Fatalf("client-side error: %v", err) + default: + log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) + } +} +``` + +## Metrics + +The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go). + +## Namespacing + +The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + +## Examples + +More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go new file mode 100644 index 000000000000..a64b8caca895 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -0,0 +1,233 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "fmt" + "strings" + + "github.com/coreos/etcd/auth/authpb" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type ( + AuthEnableResponse pb.AuthEnableResponse + AuthDisableResponse pb.AuthDisableResponse + AuthenticateResponse pb.AuthenticateResponse + AuthUserAddResponse pb.AuthUserAddResponse + AuthUserDeleteResponse pb.AuthUserDeleteResponse + AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse + AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse + AuthUserGetResponse pb.AuthUserGetResponse + AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse + AuthRoleAddResponse pb.AuthRoleAddResponse + AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse + AuthRoleGetResponse pb.AuthRoleGetResponse + AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse + AuthRoleDeleteResponse pb.AuthRoleDeleteResponse + AuthUserListResponse pb.AuthUserListResponse + AuthRoleListResponse pb.AuthRoleListResponse + + PermissionType authpb.Permission_Type + Permission authpb.Permission +) + +const ( + PermRead = authpb.READ + PermWrite = authpb.WRITE + PermReadWrite = authpb.READWRITE +) + +type Auth interface { + // AuthEnable enables auth of an etcd cluster. + AuthEnable(ctx context.Context) (*AuthEnableResponse, error) + + // AuthDisable disables auth of an etcd cluster. + AuthDisable(ctx context.Context) (*AuthDisableResponse, error) + + // UserAdd adds a new user to an etcd cluster. + UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + + // UserDelete deletes a user from an etcd cluster. + UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) + + // UserChangePassword changes a password of a user. + UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) + + // UserGrantRole grants a role to a user. + UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) + + // UserGet gets a detailed information of a user. + UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) + + // UserList gets a list of all users. + UserList(ctx context.Context) (*AuthUserListResponse, error) + + // UserRevokeRole revokes a role of a user. + UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) + + // RoleAdd adds a new role to an etcd cluster. + RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) + + // RoleGrantPermission grants a permission to a role. + RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) + + // RoleGet gets a detailed information of a role. + RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) + + // RoleList gets a list of all roles. + RoleList(ctx context.Context) (*AuthRoleListResponse, error) + + // RoleRevokePermission revokes a permission from a role. + RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) + + // RoleDelete deletes a role. + RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) +} + +type auth struct { + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func NewAuth(c *Client) Auth { + api := &auth{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) + return (*AuthEnableResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) + return (*AuthDisableResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) + return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) + return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) + return (*AuthUserGetResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) + return (*AuthUserListResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) + return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) + return (*AuthRoleAddResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { + perm := &authpb.Permission{ + Key: []byte(key), + RangeEnd: []byte(rangeEnd), + PermType: authpb.Permission_Type(permType), + } + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) + return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) + return (*AuthRoleGetResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) + return (*AuthRoleListResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...) + return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) + return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) +} + +func StrToPermissionType(s string) (PermissionType, error) { + val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] + if ok { + return PermissionType(val), nil + } + return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) +} + +type authenticator struct { + conn *grpc.ClientConn // conn in-use + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthenticateResponse)(resp), toErr(ctx, err) +} + +func (auth *authenticator) close() { + auth.conn.Close() +} + +func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return nil, err + } + + api := &authenticator{ + conn: conn, + remote: pb.NewAuthClient(conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go new file mode 100644 index 000000000000..2bdd928771f1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -0,0 +1,562 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") +) + +// Client provides and manages an etcd v3 client session. +type Client struct { + Cluster + KV + Lease + Watcher + Auth + Maintenance + + conn *grpc.ClientConn + dialerrc chan error + + cfg Config + creds *credentials.TransportCredentials + balancer *healthBalancer + mu *sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // Username is a user name for authentication. + Username string + // Password is a password for authentication. + Password string + // tokenCred is an instance of WithPerRPCCredentials()'s argument + tokenCred *authTokenCredential + + callOpts []grpc.CallOption +} + +// New creates a new etcdv3 client from a given configuration. +func New(cfg Config) (*Client, error) { + if len(cfg.Endpoints) == 0 { + return nil, ErrNoAvailableEndpoints + } + + return newClient(&cfg) +} + +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context) *Client { + cctx, cancel := context.WithCancel(ctx) + return &Client{ctx: cctx, cancel: cancel} +} + +// NewFromURL creates a new etcdv3 client from a URL. +func NewFromURL(url string) (*Client, error) { + return New(Config{Endpoints: []string{url}}) +} + +// Close shuts down the client's etcd connections. +func (c *Client) Close() error { + c.cancel() + c.Watcher.Close() + c.Lease.Close() + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() +} + +// Ctx is a context for "out of band" messages (e.g., for sending +// "clean up" message when another context is canceled). It is +// canceled on client Close(). +func (c *Client) Ctx() context.Context { return c.ctx } + +// Endpoints lists the registered endpoints for the client. +func (c *Client) Endpoints() (eps []string) { + // copy the slice; protect original endpoints from being changed + eps = make([]string, len(c.cfg.Endpoints)) + copy(eps, c.cfg.Endpoints) + return +} + +// SetEndpoints updates client's endpoints. +func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() + c.cfg.Endpoints = eps + c.mu.Unlock() + c.balancer.updateAddrs(eps...) + + // updating notifyCh can trigger new connections, + // need update addrs if all connections are down + // or addrs does not include pinAddr. + c.balancer.mu.RLock() + update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr) + c.balancer.mu.RUnlock() + if update { + select { + case c.balancer.updateAddrsC <- notifyNext: + case <-c.balancer.stopc: + } + } +} + +// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. +func (c *Client) Sync(ctx context.Context) error { + mresp, err := c.MemberList(ctx) + if err != nil { + return err + } + var eps []string + for _, m := range mresp.Members { + eps = append(eps, m.ClientURLs...) + } + c.SetEndpoints(eps...) + return nil +} + +func (c *Client) autoSync() { + if c.cfg.AutoSyncInterval == time.Duration(0) { + return + } + + for { + select { + case <-c.ctx.Done(): + return + case <-time.After(c.cfg.AutoSyncInterval): + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { + logger.Println("Auto sync endpoints failed:", err) + } + } + } +} + +type authTokenCredential struct { + token string + tokenMu *sync.RWMutex +} + +func (cred authTokenCredential) RequireTransportSecurity() bool { + return false +} + +func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { + cred.tokenMu.RLock() + defer cred.tokenMu.RUnlock() + return map[string]string{ + "token": cred.token, + }, nil +} + +func parseEndpoint(endpoint string) (proto string, host string, scheme string) { + proto = "tcp" + host = endpoint + url, uerr := url.Parse(endpoint) + if uerr != nil || !strings.Contains(endpoint, "://") { + return proto, host, scheme + } + scheme = url.Scheme + + // strip scheme:// prefix since grpc dials by host + host = url.Host + switch url.Scheme { + case "http", "https": + case "unix", "unixs": + proto = "unix" + host = url.Host + url.Path + default: + proto, host = "", "" + } + return proto, host, scheme +} + +func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { + creds = c.creds + switch scheme { + case "unix": + case "http": + creds = nil + case "https", "unixs": + if creds != nil { + break + } + tlsconfig := &tls.Config{} + emptyCreds := credentials.NewTLS(tlsconfig) + creds = &emptyCreds + default: + creds = nil + } + return creds +} + +// dialSetupOpts gives the dial opts prior to any authentication +func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) { + if c.cfg.DialTimeout > 0 { + opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} + } + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } + opts = append(opts, dopts...) + + f := func(host string, t time.Duration) (net.Conn, error) { + proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) + if host == "" && endpoint != "" { + // dialing an endpoint not in the balancer; use + // endpoint passed into dial + proto, host, _ = parseEndpoint(endpoint) + } + if proto == "" { + return nil, fmt.Errorf("unknown scheme for %q", host) + } + select { + case <-c.ctx.Done(): + return nil, c.ctx.Err() + default: + } + dialer := &net.Dialer{Timeout: t} + conn, err := dialer.DialContext(c.ctx, proto, host) + if err != nil { + select { + case c.dialerrc <- err: + default: + } + } + return conn, err + } + opts = append(opts, grpc.WithDialer(f)) + + creds := c.creds + if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { + creds = c.processCreds(scheme) + } + if creds != nil { + opts = append(opts, grpc.WithTransportCredentials(*creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + + return opts +} + +// Dial connects to a single endpoint using the client's config. +func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) { + return c.dial(endpoint) +} + +func (c *Client) getToken(ctx context.Context) error { + var err error // return last error in a case of fail + var auth *authenticator + + for i := 0; i < len(c.cfg.Endpoints); i++ { + endpoint := c.cfg.Endpoints[i] + host := getHost(endpoint) + // use dial options without dopts to avoid reusing the client balancer + auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c) + if err != nil { + continue + } + defer auth.close() + + var resp *AuthenticateResponse + resp, err = auth.authenticate(ctx, c.Username, c.Password) + if err != nil { + continue + } + + c.tokenCred.tokenMu.Lock() + c.tokenCred.token = resp.Token + c.tokenCred.tokenMu.Unlock() + + return nil + } + + return err +} + +func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts := c.dialSetupOpts(endpoint, dopts...) + host := getHost(endpoint) + if c.Username != "" && c.Password != "" { + c.tokenCred = &authTokenCredential{ + tokenMu: &sync.RWMutex{}, + } + + ctx := c.ctx + if c.cfg.DialTimeout > 0 { + cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout) + defer cancel() + ctx = cctx + } + + err := c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = context.DeadlineExceeded + } + return nil, err + } + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) + } + } + + opts = append(opts, c.cfg.DialOptions...) + + conn, err := grpc.DialContext(c.ctx, host, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +// WithRequireLeader requires client requests to only succeed +// when the cluster has a leader. +func WithRequireLeader(ctx context.Context) context.Context { + md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, md) +} + +func newClient(cfg *Config) (*Client, error) { + if cfg == nil { + cfg = &Config{} + } + var creds *credentials.TransportCredentials + if cfg.TLS != nil { + c := credentials.NewTLS(cfg.TLS) + creds = &c + } + + // use a temporary skeleton client to bootstrap first connection + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) + client := &Client{ + conn: nil, + dialerrc: make(chan error, 1), + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + mu: new(sync.Mutex), + callOpts: defaultCallOpts, + } + if cfg.Username != "" && cfg.Password != "" { + client.Username = cfg.Username + client.Password = cfg.Password + } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultFailFast, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } + + client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) { + return grpcHealthCheck(client, ep) + }) + + // use Endpoints[0] so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. + conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) + if err != nil { + client.cancel() + client.balancer.Close() + return nil, err + } + client.conn = conn + + // wait for a connection + if cfg.DialTimeout > 0 { + hasConn := false + waitc := time.After(cfg.DialTimeout) + select { + case <-client.balancer.ready(): + hasConn = true + case <-ctx.Done(): + case <-waitc: + } + if !hasConn { + err := context.DeadlineExceeded + select { + case err = <-client.dialerrc: + default: + } + client.cancel() + client.balancer.Close() + conn.Close() + return nil, err + } + } + + client.Cluster = NewCluster(client) + client.KV = NewKV(client) + client.Lease = NewLease(client) + client.Watcher = NewWatcher(client) + client.Auth = NewAuth(client) + client.Maintenance = NewMaintenance(client) + + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + + go client.autoSync() + return client, nil +} + +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + errc := make(chan error, len(c.cfg.Endpoints)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + wg.Add(len(c.cfg.Endpoints)) + for _, ep := range c.cfg.Endpoints { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + maj, _ = strconv.Atoi(vs[0]) + min, rerr = strconv.Atoi(vs[1]) + } + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for i := 0; i < len(c.cfg.Endpoints); i++ { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + +// ActiveConnection returns the current in-use connection +func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } + +// isHaltErr returns true if the given error and context indicate no forward +// progress can be made, even after reconnecting. +func isHaltErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return true + } + if err == nil { + return false + } + ev, _ := status.FromError(err) + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + // Treat Internal codes as if something failed, leaving the + // system in an inconsistent state, but retrying could make progress. + // (e.g., failed in middle of send, corrupted frame) + // TODO: are permanent Internal errors possible from grpc? + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal +} + +func toErr(ctx context.Context, err error) error { + if err == nil { + return nil + } + err = rpctypes.Error(err) + if _, ok := err.(rpctypes.EtcdError); ok { + return err + } + ev, _ := status.FromError(err) + code := ev.Code() + switch code { + case codes.DeadlineExceeded: + fallthrough + case codes.Canceled: + if ctx.Err() != nil { + err = ctx.Err() + } + case codes.Unavailable: + case codes.FailedPrecondition: + err = grpc.ErrClientConnClosing + } + return err +} + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go new file mode 100644 index 000000000000..93637ddc570c --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -0,0 +1,113 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/types" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type ( + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse +) + +type Cluster interface { + // MemberList lists the current cluster membership. + MemberList(ctx context.Context) (*MemberListResponse, error) + + // MemberAdd adds a new member into the cluster. + MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) + + // MemberUpdate updates the peer addresses of the member. + MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) +} + +type cluster struct { + remote pb.ClusterClient + callOpts []grpc.CallOption +} + +func NewCluster(c *Client) Cluster { + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + r := &pb.MemberAddRequest{PeerURLs: peerAddrs} + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberAddResponse)(resp), nil +} + +func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { + r := &pb.MemberRemoveRequest{ID: id} + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberRemoveResponse)(resp), nil +} + +func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + // it is safe to retry on update. + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { + // it is safe to retry on list. + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil + } + return nil, toErr(ctx, err) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go new file mode 100644 index 000000000000..41e80c1da5d4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +// CompactOp represents a compact operation. +type CompactOp struct { + revision int64 + physical bool +} + +// CompactOption configures compact operation. +type CompactOption func(*CompactOp) + +func (op *CompactOp) applyCompactOpts(opts []CompactOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpCompact wraps slice CompactOption to create a CompactOp. +func OpCompact(rev int64, opts ...CompactOption) CompactOp { + ret := CompactOp{revision: rev} + ret.applyCompactOpts(opts) + return ret +} + +func (op CompactOp) toRequest() *pb.CompactionRequest { + return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} +} + +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. +func WithCompactPhysical() CompactOption { + return func(op *CompactOp) { op.physical = true } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go new file mode 100644 index 000000000000..68a25fd800fb --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -0,0 +1,120 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +type CompareTarget int +type CompareResult int + +const ( + CompareVersion CompareTarget = iota + CompareCreated + CompareModified + CompareValue +) + +type Cmp pb.Compare + +func Compare(cmp Cmp, result string, v interface{}) Cmp { + var r pb.Compare_CompareResult + + switch result { + case "=": + r = pb.Compare_EQUAL + case "!=": + r = pb.Compare_NOT_EQUAL + case ">": + r = pb.Compare_GREATER + case "<": + r = pb.Compare_LESS + default: + panic("Unknown result op") + } + + cmp.Result = r + switch cmp.Target { + case pb.Compare_VALUE: + val, ok := v.(string) + if !ok { + panic("bad compare value") + } + cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} + case pb.Compare_VERSION: + cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} + case pb.Compare_CREATE: + cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} + case pb.Compare_MOD: + cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} + default: + panic("Unknown compare type") + } + return cmp +} + +func Value(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} +} + +func Version(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} +} + +func CreateRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} +} + +func ModRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_MOD} +} + +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. +func mustInt64(val interface{}) int64 { + if v, ok := val.(int64); ok { + return v + } + if v, ok := val.(int); ok { + return int64(v) + } + panic("bad value") +} + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go new file mode 100644 index 000000000000..fee12eaf60bb --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -0,0 +1,75 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "crypto/tls" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type Config struct { + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` + + // AutoSyncInterval is the interval to update endpoints with its latest members. + // 0 disables auto-sync. By default auto-sync is disabled. + AutoSyncInterval time.Duration `json:"auto-sync-interval"` + + // DialTimeout is the timeout for failing to establish a connection. + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time in seconds after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // MaxCallSendMsgSize is the client-side request send limit in bytes. + // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). + // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallSendMsgSize int + + // MaxCallRecvMsgSize is the client-side response receive limit. + // If 0, it defaults to "math.MaxInt32", because range response can + // easily exceed request send limits. + // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallRecvMsgSize int + + // TLS holds the client secure credentials, if any. + TLS *tls.Config + + // Username is a user name for authentication. + Username string `json:"username"` + + // Password is a password for authentication. + Password string `json:"password"` + + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context +} diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go new file mode 100644 index 000000000000..dacc5bb346fc --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -0,0 +1,64 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package clientv3 implements the official Go etcd client for v3. +// +// Create client using `clientv3.New`: +// +// cli, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, +// DialTimeout: 5 * time.Second, +// }) +// if err != nil { +// // handle error! +// } +// defer cli.Close() +// +// Make sure to close the client after using it. If the client is not closed, the +// connection will have leaky goroutines. +// +// To specify a client request timeout, wrap the context with context.WithTimeout: +// +// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// resp, err := kvc.Put(ctx, "sample_key", "sample_value") +// cancel() +// if err != nil { +// // handle error! +// } +// // use the response +// +// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. +// Clients are safe for concurrent use by multiple goroutines. +// +// etcd client returns 2 types of errors: +// +// 1. context error: canceled or deadline exceeded. +// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go +// +// Here is the example code to handle client errors: +// +// resp, err := kvc.Put(ctx, "", "") +// if err != nil { +// if err == context.Canceled { +// // ctx is canceled by another routine +// } else if err == context.DeadlineExceeded { +// // ctx is attached with a deadline and it exceeded +// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { +// // process (verr.Errors) +// } else { +// // bad cluster endpoints, which are not etcd servers +// } +// } +// +package clientv3 diff --git a/vendor/github.com/coreos/etcd/clientv3/grpc_options.go b/vendor/github.com/coreos/etcd/clientv3/grpc_options.go new file mode 100644 index 000000000000..592dd6993cf6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/grpc_options.go @@ -0,0 +1,46 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + + "google.golang.org/grpc" +) + +var ( + // Disable gRPC internal retrial logic + // TODO: enable when gRPC retry is stable (FailFast=false) + // Reference: + // - https://github.com/grpc/grpc-go/issues/1532 + // - https://github.com/grpc/proposal/blob/master/A6-client-retries.md + defaultFailFast = grpc.FailFast(true) + + // client-side request send limit, gRPC default is math.MaxInt32 + // Make sure that "client-side send limit < server-side default send/recv limit" + // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + + // client-side response receive limit, gRPC default is 4MB + // Make sure that "client-side receive limit >= server-side default send/recv limit" + // because range response can easily exceed request send limits + // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) +) + +// defaultCallOpts defines a list of default "gRPC.CallOption". +// Some options are exposed to "clientv3.Config". +// Defaults will be overridden by the settings in "clientv3.Config". +var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go new file mode 100644 index 000000000000..52bea90e66ed --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go @@ -0,0 +1,627 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "errors" + "net/url" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +const ( + minHealthRetryDuration = 3 * time.Second + unknownService = "unknown service grpc.health.v1.Health" +) + +// ErrNoAddrAvilable is returned by Get() when the balancer does not have +// any active connection to endpoints at the time. +// This error is returned only when opts.BlockingWait is true. +var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available") + +type healthCheckFunc func(ep string) (bool, error) + +type notifyMsg int + +const ( + notifyReset notifyMsg = iota + notifyNext +) + +// healthBalancer does the bare minimum to expose multiple eps +// to the grpc reconnection code path +type healthBalancer struct { + // addrs are the client's endpoint addresses for grpc + addrs []grpc.Address + + // eps holds the raw endpoints from the client + eps []string + + // notifyCh notifies grpc of the set of addresses for connecting + notifyCh chan []grpc.Address + + // readyc closes once the first connection is up + readyc chan struct{} + readyOnce sync.Once + + // healthCheck checks an endpoint's health. + healthCheck healthCheckFunc + healthCheckTimeout time.Duration + + unhealthyMu sync.RWMutex + unhealthyHostPorts map[string]time.Time + + // mu protects all fields below. + mu sync.RWMutex + + // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. + upc chan struct{} + + // downc closes when grpc calls down() on pinAddr + downc chan struct{} + + // stopc is closed to signal updateNotifyLoop should stop. + stopc chan struct{} + stopOnce sync.Once + wg sync.WaitGroup + + // donec closes when all goroutines are exited + donec chan struct{} + + // updateAddrsC notifies updateNotifyLoop to update addrs. + updateAddrsC chan notifyMsg + + // grpc issues TLS cert checks using the string passed into dial so + // that string must be the host. To recover the full scheme://host URL, + // have a map from hosts to the original endpoint. + hostPort2ep map[string]string + + // pinAddr is the currently pinned address; set to the empty string on + // initialization and shutdown. + pinAddr string + + closed bool +} + +func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer { + notifyCh := make(chan []grpc.Address) + addrs := eps2addrs(eps) + hb := &healthBalancer{ + addrs: addrs, + eps: eps, + notifyCh: notifyCh, + readyc: make(chan struct{}), + healthCheck: hc, + unhealthyHostPorts: make(map[string]time.Time), + upc: make(chan struct{}), + stopc: make(chan struct{}), + downc: make(chan struct{}), + donec: make(chan struct{}), + updateAddrsC: make(chan notifyMsg), + hostPort2ep: getHostPort2ep(eps), + } + if timeout < minHealthRetryDuration { + timeout = minHealthRetryDuration + } + hb.healthCheckTimeout = timeout + + close(hb.downc) + go hb.updateNotifyLoop() + hb.wg.Add(1) + go func() { + defer hb.wg.Done() + hb.updateUnhealthy() + }() + return hb +} + +func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } + +func (b *healthBalancer) ConnectNotify() <-chan struct{} { + b.mu.Lock() + defer b.mu.Unlock() + return b.upc +} + +func (b *healthBalancer) ready() <-chan struct{} { return b.readyc } + +func (b *healthBalancer) endpoint(hostPort string) string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.hostPort2ep[hostPort] +} + +func (b *healthBalancer) pinned() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.pinAddr +} + +func (b *healthBalancer) hostPortError(hostPort string, err error) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) + } + return + } + + b.unhealthyMu.Lock() + b.unhealthyHostPorts[hostPort] = time.Now() + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) + } +} + +func (b *healthBalancer) removeUnhealthy(hostPort, msg string) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) + } + return + } + + b.unhealthyMu.Lock() + delete(b.unhealthyHostPorts, hostPort) + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) + } +} + +func (b *healthBalancer) countUnhealthy() (count int) { + b.unhealthyMu.RLock() + count = len(b.unhealthyHostPorts) + b.unhealthyMu.RUnlock() + return count +} + +func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) { + b.unhealthyMu.RLock() + _, unhealthy = b.unhealthyHostPorts[hostPort] + b.unhealthyMu.RUnlock() + return unhealthy +} + +func (b *healthBalancer) cleanupUnhealthy() { + b.unhealthyMu.Lock() + for k, v := range b.unhealthyHostPorts { + if time.Since(v) > b.healthCheckTimeout { + delete(b.unhealthyHostPorts, k) + if logger.V(4) { + logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) + } + } + } + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { + unhealthyCnt := b.countUnhealthy() + + b.mu.RLock() + defer b.mu.RUnlock() + + hbAddrs := b.addrs + if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) { + liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep)) + for k := range b.hostPort2ep { + liveHostPorts[k] = struct{}{} + } + return hbAddrs, liveHostPorts + } + + addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt) + liveHostPorts := make(map[string]struct{}, len(addrs)) + for _, addr := range b.addrs { + if !b.isUnhealthy(addr.Addr) { + addrs = append(addrs, addr) + liveHostPorts[addr.Addr] = struct{}{} + } + } + return addrs, liveHostPorts +} + +func (b *healthBalancer) updateUnhealthy() { + for { + select { + case <-time.After(b.healthCheckTimeout): + b.cleanupUnhealthy() + pinned := b.pinned() + if pinned == "" || b.isUnhealthy(pinned) { + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + return + } + } + case <-b.stopc: + return + } + } +} + +func (b *healthBalancer) updateAddrs(eps ...string) { + np := getHostPort2ep(eps) + + b.mu.Lock() + defer b.mu.Unlock() + + match := len(np) == len(b.hostPort2ep) + if match { + for k, v := range np { + if b.hostPort2ep[k] != v { + match = false + break + } + } + } + if match { + // same endpoints, so no need to update address + return + } + + b.hostPort2ep = np + b.addrs, b.eps = eps2addrs(eps), eps + + b.unhealthyMu.Lock() + b.unhealthyHostPorts = make(map[string]time.Time) + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) next() { + b.mu.RLock() + downc := b.downc + b.mu.RUnlock() + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + } + // wait until disconnect so new RPCs are not issued on old connection + select { + case <-downc: + case <-b.stopc: + } +} + +func (b *healthBalancer) updateNotifyLoop() { + defer close(b.donec) + + for { + b.mu.RLock() + upc, downc, addr := b.upc, b.downc, b.pinAddr + b.mu.RUnlock() + // downc or upc should be closed + select { + case <-downc: + downc = nil + default: + } + select { + case <-upc: + upc = nil + default: + } + switch { + case downc == nil && upc == nil: + // stale + select { + case <-b.stopc: + return + default: + } + case downc == nil: + b.notifyAddrs(notifyReset) + select { + case <-upc: + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + case upc == nil: + select { + // close connections that are not the pinned address + case b.notifyCh <- []grpc.Address{{Addr: addr}}: + case <-downc: + case <-b.stopc: + return + } + select { + case <-downc: + b.notifyAddrs(notifyReset) + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + } + } +} + +func (b *healthBalancer) notifyAddrs(msg notifyMsg) { + if msg == notifyNext { + select { + case b.notifyCh <- []grpc.Address{}: + case <-b.stopc: + return + } + } + b.mu.RLock() + pinAddr := b.pinAddr + downc := b.downc + b.mu.RUnlock() + addrs, hostPorts := b.liveAddrs() + + var waitDown bool + if pinAddr != "" { + _, ok := hostPorts[pinAddr] + waitDown = !ok + } + + select { + case b.notifyCh <- addrs: + if waitDown { + select { + case <-downc: + case <-b.stopc: + } + } + case <-b.stopc: + } +} + +func (b *healthBalancer) Up(addr grpc.Address) func(error) { + if !b.mayPin(addr) { + return func(err error) {} + } + + b.mu.Lock() + defer b.mu.Unlock() + + // gRPC might call Up after it called Close. We add this check + // to "fix" it up at application layer. Otherwise, will panic + // if b.upc is already closed. + if b.closed { + return func(err error) {} + } + + // gRPC might call Up on a stale address. + // Prevent updating pinAddr with a stale address. + if !hasAddr(b.addrs, addr.Addr) { + return func(err error) {} + } + + if b.pinAddr != "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) + } + return func(err error) {} + } + + // notify waiting Get()s and pin first connected address + close(b.upc) + b.downc = make(chan struct{}) + b.pinAddr = addr.Addr + if logger.V(4) { + logger.Infof("clientv3/balancer: pin %q", addr.Addr) + } + + // notify client that a connection is up + b.readyOnce.Do(func() { close(b.readyc) }) + + return func(err error) { + // If connected to a black hole endpoint or a killed server, the gRPC ping + // timeout will induce a network I/O error, and retrying until success; + // finding healthy endpoint on retry could take several timeouts and redials. + // To avoid wasting retries, gray-list unhealthy endpoints. + b.hostPortError(addr.Addr, err) + + b.mu.Lock() + b.upc = make(chan struct{}) + close(b.downc) + b.pinAddr = "" + b.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) + } + } +} + +func (b *healthBalancer) mayPin(addr grpc.Address) bool { + if b.endpoint(addr.Addr) == "" { // stale host:port + return false + } + + b.unhealthyMu.RLock() + unhealthyCnt := len(b.unhealthyHostPorts) + failedTime, bad := b.unhealthyHostPorts[addr.Addr] + b.unhealthyMu.RUnlock() + + b.mu.RLock() + skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt + b.mu.RUnlock() + if skip || !bad { + return true + } + + // prevent isolated member's endpoint from being infinitely retried, as follows: + // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm + // 2. balancer 'Up' unpins with grpc: failed with network I/O error + // 3. grpc-healthcheck still SERVING, thus retry to pin + // instead, return before grpc-healthcheck if failed within healthcheck timeout + if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) + } + return false + } + + if ok, _ := b.healthCheck(addr.Addr); ok { + b.removeUnhealthy(addr.Addr, "health check success") + return true + } + + b.hostPortError(addr.Addr, errors.New("health check failed")) + return false +} + +func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { + var ( + addr string + closed bool + ) + + // If opts.BlockingWait is false (for fail-fast RPCs), it should return + // an address it has notified via Notify immediately instead of blocking. + if !opts.BlockingWait { + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr == "" { + return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable + } + return grpc.Address{Addr: addr}, func() {}, nil + } + + for { + b.mu.RLock() + ch := b.upc + b.mu.RUnlock() + select { + case <-ch: + case <-b.donec: + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + case <-ctx.Done(): + return grpc.Address{Addr: ""}, nil, ctx.Err() + } + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr != "" { + break + } + } + return grpc.Address{Addr: addr}, func() {}, nil +} + +func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } + +func (b *healthBalancer) Close() error { + b.mu.Lock() + // In case gRPC calls close twice. TODO: remove the checking + // when we are sure that gRPC wont call close twice. + if b.closed { + b.mu.Unlock() + <-b.donec + return nil + } + b.closed = true + b.stopOnce.Do(func() { close(b.stopc) }) + b.pinAddr = "" + + // In the case of following scenario: + // 1. upc is not closed; no pinned address + // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks + // 3. client.conn.Close() calls balancer.Close(); closed = true + // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled + // we must close upc so Get() exits from blocking on upc + select { + case <-b.upc: + default: + // terminate all waiting Get()s + close(b.upc) + } + + b.mu.Unlock() + b.wg.Wait() + + // wait for updateNotifyLoop to finish + <-b.donec + close(b.notifyCh) + + return nil +} + +func grpcHealthCheck(client *Client, ep string) (bool, error) { + conn, err := client.dial(ep) + if err != nil { + return false, err + } + defer conn.Close() + cli := healthpb.NewHealthClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) + cancel() + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { + if s.Message() == unknownService { // etcd < v3.3.0 + return true, nil + } + } + return false, err + } + return resp.Status == healthpb.HealthCheckResponse_SERVING, nil +} + +func hasAddr(addrs []grpc.Address, targetAddr string) bool { + for _, addr := range addrs { + if targetAddr == addr.Addr { + return true + } + } + return false +} + +func getHost(ep string) string { + url, uerr := url.Parse(ep) + if uerr != nil || !strings.Contains(ep, "://") { + return ep + } + return url.Host +} + +func eps2addrs(eps []string) []grpc.Address { + addrs := make([]grpc.Address, len(eps)) + for i := range eps { + addrs[i].Addr = getHost(eps[i]) + } + return addrs +} + +func getHostPort2ep(eps []string) map[string]string { + hm := make(map[string]string, len(eps)) + for i := range eps { + _, host, _ := parseEndpoint(eps[i]) + hm[host] = eps[i] + } + return hm +} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go new file mode 100644 index 000000000000..6289605c8e05 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -0,0 +1,176 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type ( + CompactResponse pb.CompactionResponse + PutResponse pb.PutResponse + GetResponse pb.RangeResponse + DeleteResponse pb.DeleteRangeResponse + TxnResponse pb.TxnResponse +) + +type KV interface { + // Put puts a key-value pair into etcd. + // Note that key,value can be plain bytes array and string is + // an immutable representation of that bytes array. + // To get a string of bytes, do string([]byte{0x10, 0x20}). + Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) + + // Get retrieves keys. + // By default, Get will return the value for "key", if any. + // When passed WithRange(end), Get will return the keys in the range [key, end). + // When passed WithFromKey(), Get returns keys greater than or equal to key. + // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; + // if the required revision is compacted, the request will fail with ErrCompacted . + // When passed WithLimit(limit), the number of returned keys is bounded by limit. + // When passed WithSort(), the keys will be sorted. + Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) + + // Delete deletes a key, or optionally using WithRange(end), [key, end). + Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) + + // Compact compacts etcd KV history before the given rev. + Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) + + // Do applies a single Op on KV without a transaction. + // Do is useful when creating arbitrary operations to be issued at a + // later time; the user can range over the operations, calling Do to + // execute them. Get/Put/Delete, on the other hand, are best suited + // for when the operation should be issued at the time of declaration. + Do(ctx context.Context, op Op) (OpResponse, error) + + // Txn creates a transaction. + Txn(ctx context.Context) Txn +} + +type OpResponse struct { + put *PutResponse + get *GetResponse + del *DeleteResponse + txn *TxnResponse +} + +func (op OpResponse) Put() *PutResponse { return op.put } +func (op OpResponse) Get() *GetResponse { return op.get } +func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} + +type kv struct { + remote pb.KVClient + callOpts []grpc.CallOption +} + +func NewKV(c *Client) KV { + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { + r, err := kv.Do(ctx, OpPut(key, val, opts...)) + return r.put, toErr(ctx, err) +} + +func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { + r, err := kv.Do(ctx, OpGet(key, opts...)) + return r.get, toErr(ctx, err) +} + +func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { + r, err := kv.Do(ctx, OpDelete(key, opts...)) + return r.del, toErr(ctx, err) +} + +func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*CompactResponse)(resp), err +} + +func (kv *kv) Txn(ctx context.Context) Txn { + return &txn{ + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, + } +} + +func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { + var err error + switch op.t { + case tRange: + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + case tPut: + var resp *pb.PutResponse + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{put: (*PutResponse)(resp)}, nil + } + case tDeleteRange: + var resp *pb.DeleteRangeResponse + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{del: (*DeleteResponse)(resp)}, nil + } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } + default: + panic("Unknown op") + } + return OpResponse{}, toErr(ctx, err) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go new file mode 100644 index 000000000000..e74e1d6b549f --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -0,0 +1,545 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "sync" + "time" + + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type ( + LeaseRevokeResponse pb.LeaseRevokeResponse + LeaseID int64 +) + +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. +type LeaseGrantResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 + Error string +} + +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. +type LeaseKeepAliveResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 +} + +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. +type LeaseTimeToLiveResponse struct { + *pb.ResponseHeader + ID LeaseID `json:"id"` + + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `json:"ttl"` + + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `json:"granted-ttl"` + + // Keys is the list of keys attached to this lease. + Keys [][]byte `json:"keys"` +} + +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + +const ( + // defaultTTL is the assumed lease TTL used for the first keepalive + // deadline before the actual TTL is known to the client. + defaultTTL = 5 * time.Second + // a small buffer to store unsent lease responses. + leaseResponseChSize = 16 + // NoLease is a lease ID for the absence of a lease. + NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond +) + +// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. +// +// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. +type ErrKeepAliveHalted struct { + Reason error +} + +func (e ErrKeepAliveHalted) Error() string { + s := "etcdclient: leases keep alive halted" + if e.Reason != nil { + s += ": " + e.Reason.Error() + } + return s +} + +type Lease interface { + // Grant creates a new lease. + Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) + + // Revoke revokes the given lease. + Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) + + // TimeToLive retrieves the lease information of the given lease ID. + TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) + + // KeepAlive keeps the given lease alive forever. + KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) + + // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive + // should be used instead of KeepAliveOnce. + KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) + + // Close releases all resources Lease keeps for efficient communication + // with the etcd server. + Close() error +} + +type lessor struct { + mu sync.Mutex // guards all fields + + // donec is closed and loopErr is set when recvKeepAliveLoop stops + donec chan struct{} + loopErr error + + remote pb.LeaseClient + + stream pb.Lease_LeaseKeepAliveClient + streamCancel context.CancelFunc + + stopCtx context.Context + stopCancel context.CancelFunc + + keepAlives map[LeaseID]*keepAlive + + // firstKeepAliveTimeout is the timeout for the first keepalive request + // before the actual TTL is known to the lease client + firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption +} + +// keepAlive multiplexes a keepalive for a lease over multiple channels +type keepAlive struct { + chs []chan<- *LeaseKeepAliveResponse + ctxs []context.Context + // deadline is the time the keep alive channels close if no response + deadline time.Time + // nextKeepAlive is when to send the next keep alive message + nextKeepAlive time.Time + // donec is closed on lease revoke, expiration, or cancel. + donec chan struct{} +} + +func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { + l := &lessor{ + donec: make(chan struct{}), + keepAlives: make(map[LeaseID]*keepAlive), + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, + } + if l.firstKeepAliveTimeout == time.Second { + l.firstKeepAliveTimeout = defaultTTL + } + if c != nil { + l.callOpts = c.callOpts + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) + return l +} + +func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { + ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize) + + l.mu.Lock() + // ensure that recvKeepAliveLoop is still running + select { + case <-l.donec: + err := l.loopErr + l.mu.Unlock() + close(ch) + return ch, ErrKeepAliveHalted{Reason: err} + default: + } + ka, ok := l.keepAlives[id] + if !ok { + // create fresh keep alive + ka = &keepAlive{ + chs: []chan<- *LeaseKeepAliveResponse{ch}, + ctxs: []context.Context{ctx}, + deadline: time.Now().Add(l.firstKeepAliveTimeout), + nextKeepAlive: time.Now(), + donec: make(chan struct{}), + } + l.keepAlives[id] = ka + } else { + // add channel and context to existing keep alive + ka.ctxs = append(ka.ctxs, ctx) + ka.chs = append(ka.chs, ch) + } + l.mu.Unlock() + + go l.keepAliveCtxCloser(id, ctx, ka.donec) + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) + + return ch, nil +} + +func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + for { + resp, err := l.keepAliveOnce(ctx, id) + if err == nil { + if resp.TTL <= 0 { + err = rpctypes.ErrLeaseNotFound + } + return resp, err + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } + } +} + +func (l *lessor) Close() error { + l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) + <-l.donec + return nil +} + +func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) { + select { + case <-donec: + return + case <-l.donec: + return + case <-ctx.Done(): + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[id] + if !ok { + return + } + + // close channel and remove context if still associated with keep alive + for i, c := range ka.ctxs { + if c == ctx { + close(ka.chs[i]) + ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) + ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) + break + } + } + // remove if no one more listeners + if len(ka.chs) == 0 { + delete(l.keepAlives, id) + } +} + +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) + if err != nil { + return nil, toErr(ctx, err) + } + + resp, rerr := stream.Recv() + if rerr != nil { + return nil, toErr(ctx, rerr) + } + + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + return karesp, nil +} + +func (l *lessor) recvKeepAliveLoop() (gerr error) { + defer func() { + l.mu.Lock() + close(l.donec) + l.loopErr = gerr + for _, ka := range l.keepAlives { + ka.close() + } + l.keepAlives = make(map[LeaseID]*keepAlive) + l.mu.Unlock() + }() + + for { + stream, err := l.resetRecv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + continue + case <-l.stopCtx.Done(): + return l.stopCtx.Err() + } + } +} + +// resetRecv opens a new lease stream and starts sending keep alive requests. +func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { + sctx, cancel := context.WithCancel(l.stopCtx) + stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...) + if err != nil { + cancel() + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stream != nil && l.streamCancel != nil { + l.streamCancel() + } + + l.streamCancel = cancel + l.stream = stream + + go l.sendKeepAliveLoop(stream) + return stream, nil +} + +// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse +func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[karesp.ID] + if !ok { + return + } + + if karesp.TTL <= 0 { + // lease expired; close all keep alive channels + delete(l.keepAlives, karesp.ID) + ka.close() + return + } + + // send update to all channels + nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) + ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) + for _, ch := range ka.chs { + select { + case ch <- karesp: + ka.nextKeepAlive = nextKeepAlive + default: + } + } +} + +// deadlineLoop reaps any keep alive channels that have not received a response +// within the lease TTL +func (l *lessor) deadlineLoop() { + for { + select { + case <-time.After(time.Second): + case <-l.donec: + return + } + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.deadline.Before(now) { + // waited too long for response; lease may be expired + ka.close() + delete(l.keepAlives, id) + } + } + l.mu.Unlock() + } +} + +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. +func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { + for { + var tosend []LeaseID + + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.nextKeepAlive.Before(now) { + tosend = append(tosend, id) + } + } + l.mu.Unlock() + + for _, id := range tosend { + r := &pb.LeaseKeepAliveRequest{ID: int64(id)} + if err := stream.Send(r); err != nil { + // TODO do something with this error? + return + } + } + + select { + case <-time.After(500 * time.Millisecond): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } + } +} + +func (ka *keepAlive) close() { + close(ka.donec) + for _, ch := range ka.chs { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go new file mode 100644 index 000000000000..012abdbce639 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -0,0 +1,96 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "io/ioutil" + "sync" + + "google.golang.org/grpc/grpclog" +) + +// Logger is the logger used by client library. +// It implements grpclog.LoggerV2 interface. +type Logger grpclog.LoggerV2 + +var ( + logger settableLogger +) + +type settableLogger struct { + l grpclog.LoggerV2 + mu sync.RWMutex +} + +func init() { + // disable client side logs by default + logger.mu.Lock() + logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) + + // logger has to override the grpclog at initialization so that + // any changes to the grpclog go through logger with locking + // instead of through SetLogger + // + // now updates only happen through settableLogger.set + grpclog.SetLoggerV2(&logger) + logger.mu.Unlock() +} + +// SetLogger sets client-side Logger. By default, logs are disabled. +func SetLogger(l Logger) { + logger.set(l) +} + +// GetLogger returns the current logger. +func GetLogger() Logger { + return logger.get() +} + +func (s *settableLogger) set(l Logger) { + s.mu.Lock() + logger.l = l + grpclog.SetLoggerV2(&logger) + s.mu.Unlock() +} + +func (s *settableLogger) get() Logger { + s.mu.RLock() + l := logger.l + s.mu.RUnlock() + return l +} + +// implement the grpclog.LoggerV2 interface + +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } +func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } +func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } +func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go new file mode 100644 index 000000000000..67b928fcfb3a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -0,0 +1,187 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "io" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type ( + DefragmentResponse pb.DefragmentResponse + AlarmResponse pb.AlarmResponse + AlarmMember pb.AlarmMember + StatusResponse pb.StatusResponse +) + +type Maintenance interface { + // AlarmList gets all active alarms. + AlarmList(ctx context.Context) (*AlarmResponse, error) + + // AlarmDisarm disarms a given alarm. + AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) + + // Defragment releases wasted space from internal fragmentation on a given etcd member. + // Defragment is only needed when deleting a large number of keys and want to reclaim + // the resources. + // Defragment is an expensive operation. User should avoid defragmenting multiple members + // at the same time. + // To defragment multiple members in the cluster, user need to call defragment multiple + // times with different endpoints. + Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) + + // Status gets the status of the endpoint. + Status(ctx context.Context, endpoint string) (*StatusResponse, error) + + // Snapshot provides a reader for a point-in-time snapshot of etcd. + Snapshot(ctx context.Context) (io.ReadCloser, error) +} + +type maintenance struct { + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption +} + +func NewMaintenance(c *Client) Maintenance { + api := &maintenance{ + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.dial(endpoint) + if err != nil { + return nil, nil, err + } + cancel := func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_GET, + MemberID: 0, // all + Alarm: pb.AlarmType_NONE, // all + } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_DEACTIVATE, + MemberID: am.MemberID, + Alarm: am.Alarm, + } + + if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { + ar, err := m.AlarmList(ctx) + if err != nil { + return nil, toErr(ctx, err) + } + ret := AlarmResponse{} + for _, am := range ar.Alarms { + dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) + if derr != nil { + return nil, toErr(ctx, derr) + } + ret.Alarms = append(ret.Alarms, dresp.Alarms...) + } + return &ret, nil + } + + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*DefragmentResponse)(resp), nil +} + +func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*StatusResponse)(resp), nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + + pr, pw := io.Pipe() + go func() { + for { + resp, err := ss.Recv() + if err != nil { + pw.CloseWithError(err) + return + } + if resp == nil && err == nil { + break + } + if _, werr := pw.Write(resp.Blob); werr != nil { + pw.CloseWithError(werr) + return + } + } + pw.Close() + }() + return pr, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go new file mode 100644 index 000000000000..e18d28662c46 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -0,0 +1,511 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + +type opType int + +const ( + // A default Op has opType 0, which is invalid. + tRange opType = iota + 1 + tPut + tDeleteRange + tTxn +) + +var ( + noPrefixEnd = []byte{0} +) + +// Op represents an Operation that kv can execute. +type Op struct { + t opType + key []byte + end []byte + + // for range + limit int64 + sort *SortOption + serializable bool + keysOnly bool + countOnly bool + minModRev int64 + maxModRev int64 + minCreateRev int64 + maxCreateRev int64 + + // for range, watch + rev int64 + + // for watch, put, delete + prevKV bool + + // for put + ignoreValue bool + ignoreLease bool + + // progressNotify is for progress updates. + progressNotify bool + // createdNotify is for created event + createdNotify bool + // filters for watchers + filterPut bool + filterDelete bool + + // for put + val []byte + leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op +} + +// accessors / mutators + +func (op Op) IsTxn() bool { return op.t == tTxn } +func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable == true } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly == true } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly == true } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + +func (op Op) toRangeRequest() *pb.RangeRequest { + if op.t != tRange { + panic("op.t != tRange") + } + r := &pb.RangeRequest{ + Key: op.key, + RangeEnd: op.end, + Limit: op.limit, + Revision: op.rev, + Serializable: op.serializable, + KeysOnly: op.keysOnly, + CountOnly: op.countOnly, + MinModRevision: op.minModRev, + MaxModRevision: op.maxModRev, + MinCreateRevision: op.minCreateRev, + MaxCreateRevision: op.maxCreateRev, + } + if op.sort != nil { + r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) + r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) + } + return r +} + +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + +func (op Op) toRequestOp() *pb.RequestOp { + switch op.t { + case tRange: + return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} + case tPut: + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} + case tDeleteRange: + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + default: + panic("Unknown Op") + } +} + +func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } + return op.t != tRange +} + +func OpGet(key string, opts ...OpOption) Op { + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + return ret +} + +func OpDelete(key string, opts ...OpOption) Op { + ret := Op{t: tDeleteRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in delete") + case ret.limit != 0: + panic("unexpected limit in delete") + case ret.rev != 0: + panic("unexpected revision in delete") + case ret.sort != nil: + panic("unexpected sort in delete") + case ret.serializable: + panic("unexpected serializable in delete") + case ret.countOnly: + panic("unexpected countOnly in delete") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in delete") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in delete") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in delete") + case ret.createdNotify: + panic("unexpected createdNotify in delete") + } + return ret +} + +func OpPut(key, val string, opts ...OpOption) Op { + ret := Op{t: tPut, key: []byte(key), val: []byte(val)} + ret.applyOpts(opts) + switch { + case ret.end != nil: + panic("unexpected range in put") + case ret.limit != 0: + panic("unexpected limit in put") + case ret.rev != 0: + panic("unexpected revision in put") + case ret.sort != nil: + panic("unexpected sort in put") + case ret.serializable: + panic("unexpected serializable in put") + case ret.countOnly: + panic("unexpected countOnly in put") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in put") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in put") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in put") + case ret.createdNotify: + panic("unexpected createdNotify in put") + } + return ret +} + +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + +func opWatch(key string, opts ...OpOption) Op { + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in watch") + case ret.limit != 0: + panic("unexpected limit in watch") + case ret.sort != nil: + panic("unexpected sort in watch") + case ret.serializable: + panic("unexpected serializable in watch") + case ret.countOnly: + panic("unexpected countOnly in watch") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in watch") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in watch") + } + return ret +} + +func (op *Op) applyOpts(opts []OpOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpOption configures Operations like Get, Put, Delete. +type OpOption func(*Op) + +// WithLease attaches a lease ID to a key in 'Put' request. +func WithLease(leaseID LeaseID) OpOption { + return func(op *Op) { op.leaseID = leaseID } +} + +// WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. +func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } + +// WithRev specifies the store revision for 'Get' request. +// Or the start revision of 'Watch' request. +func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } + +// WithSort specifies the ordering in 'Get' request. It requires +// 'WithRange' and/or 'WithPrefix' to be specified too. +// 'target' specifies the target to sort by: key, version, revisions, value. +// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. +func WithSort(target SortTarget, order SortOrder) OpOption { + return func(op *Op) { + if target == SortByKey && order == SortAscend { + // If order != SortNone, server fetches the entire key-space, + // and then applies the sort and limit, if provided. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. + order = SortNone + } + op.sort = &SortOption{target, order} + } +} + +// GetPrefixRangeEnd gets the range end of the prefix. +// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. +func GetPrefixRangeEnd(prefix string) string { + return string(getPrefix([]byte(prefix))) +} + +func getPrefix(key []byte) []byte { + end := make([]byte, len(key)) + copy(end, key) + for i := len(end) - 1; i >= 0; i-- { + if end[i] < 0xff { + end[i] = end[i] + 1 + end = end[:i+1] + return end + } + } + // next prefix does not exist (e.g., 0xffff); + // default to WithFromKey policy + return noPrefixEnd +} + +// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate +// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' +// can return 'foo1', 'foo2', and so on. +func WithPrefix() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } + op.end = getPrefix(op.key) + } +} + +// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. +// For example, 'Get' requests with 'WithRange(end)' returns +// the keys in the range [key, end). +// endKey must be lexicographically greater than start key. +func WithRange(endKey string) OpOption { + return func(op *Op) { op.end = []byte(endKey) } +} + +// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests +// to be equal or greater than the key in the argument. +func WithFromKey() OpOption { return WithRange("\x00") } + +// WithSerializable makes 'Get' request serializable. By default, +// it's linearizable. Serializable requests are better for lower latency +// requirement. +func WithSerializable() OpOption { + return func(op *Op) { op.serializable = true } +} + +// WithKeysOnly makes the 'Get' request return only the keys and the corresponding +// values will be omitted. +func WithKeysOnly() OpOption { + return func(op *Op) { op.keysOnly = true } +} + +// WithCountOnly makes the 'Get' request return only the count of keys. +func WithCountOnly() OpOption { + return func(op *Op) { op.countOnly = true } +} + +// WithMinModRev filters out keys for Get with modification revisions less than the given revision. +func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } + +// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. +func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } + +// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. +func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } + +// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. +func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } + +// WithFirstCreate gets the key with the oldest creation revision in the request range. +func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } + +// WithLastCreate gets the key with the latest creation revision in the request range. +func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } + +// WithFirstKey gets the lexically first key in the request range. +func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } + +// WithLastKey gets the lexically last key in the request range. +func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } + +// WithFirstRev gets the key with the oldest modification revision in the request range. +func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } + +// WithLastRev gets the key with the latest modification revision in the request range. +func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } + +// withTop gets the first key over the get's prefix given a sort order +func withTop(target SortTarget, order SortOrder) []OpOption { + return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} +} + +// WithProgressNotify makes watch server send periodic progress updates +// every 10 minutes when there is no incoming events. +// Progress updates have zero events in WatchResponse. +func WithProgressNotify() OpOption { + return func(op *Op) { + op.progressNotify = true + } +} + +// WithCreatedNotify makes watch server sends the created event. +func WithCreatedNotify() OpOption { + return func(op *Op) { + op.createdNotify = true + } +} + +// WithFilterPut discards PUT events from the watcher. +func WithFilterPut() OpOption { + return func(op *Op) { op.filterPut = true } +} + +// WithFilterDelete discards DELETE events from the watcher. +func WithFilterDelete() OpOption { + return func(op *Op) { op.filterDelete = true } +} + +// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, +// nothing will be returned. +func WithPrevKV() OpOption { + return func(op *Op) { + op.prevKV = true + } +} + +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + +// LeaseOp represents an Operation that lease can execute. +type LeaseOp struct { + id LeaseID + + // for TimeToLive + attachedKeys bool +} + +// LeaseOption configures lease operations. +type LeaseOption func(*LeaseOp) + +func (op *LeaseOp) applyOpts(opts []LeaseOption) { + for _, opt := range opts { + opt(op) + } +} + +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. +func WithAttachedKeys() LeaseOption { + return func(op *LeaseOp) { op.attachedKeys = true } +} + +func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { + ret := &LeaseOp{id: id} + ret.applyOpts(opts) + return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} +} diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go new file mode 100644 index 000000000000..23eea9367ff7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go @@ -0,0 +1,30 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import "golang.org/x/net/context" + +// TODO: remove this when "FailFast=false" is fixed. +// See https://github.com/grpc/grpc-go/issues/1532. +func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { + select { + case <-ready: + return nil + case <-rpcCtx.Done(): + return rpcCtx.Err() + case <-clientCtx.Done(): + return clientCtx.Err() + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go new file mode 100644 index 000000000000..c95b2cad7c4f --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -0,0 +1,471 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type rpcFunc func(ctx context.Context) error +type retryRPCFunc func(context.Context, rpcFunc) error +type retryStopErrFunc func(error) bool + +func isRepeatableStopError(err error) bool { + eErr := rpctypes.Error(err) + // always stop retry on etcd errors + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + return true + } + // only retry if unavailable + ev, _ := status.FromError(err) + return ev.Code() != codes.Unavailable +} + +func isNonRepeatableStopError(err error) bool { + ev, _ := status.FromError(err) + if ev.Code() != codes.Unavailable { + return true + } + desc := rpctypes.ErrorDesc(err) + return desc != "there is no address available" && desc != "there is no connection available" +} + +func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { + return func(rpcCtx context.Context, f rpcFunc) error { + for { + if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { + return err + } + pinned := c.balancer.pinned() + err := f(rpcCtx) + if err == nil { + return nil + } + if logger.V(4) { + logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) + } + + if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { + // mark this before endpoint switch is triggered + c.balancer.hostPortError(pinned, err) + c.balancer.next() + if logger.V(4) { + logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) + } + } + + if isStop(err) { + return err + } + } + } +} + +func (c *Client) newAuthRetryWrapper() retryRPCFunc { + return func(rpcCtx context.Context, f rpcFunc) error { + for { + pinned := c.balancer.pinned() + err := f(rpcCtx) + if err == nil { + return nil + } + if logger.V(4) { + logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) + } + // always stop retry on etcd errors other than invalid auth token + if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { + gterr := c.getToken(rpcCtx) + if gterr != nil { + if logger.V(4) { + logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) + } + return err // return the original error for simplicity + } + continue + } + return err + } + } +} + +// RetryKVClient implements a KVClient. +func RetryKVClient(c *Client) pb.KVClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + conn := pb.NewKVClient(c.conn) + retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} + retryAuthWrapper := c.newAuthRetryWrapper() + return &retryKVClient{ + &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, + retryAuthWrapper} +} + +type retryKVClient struct { + *nonRepeatableKVClient + repeatableRetry retryRPCFunc +} + +func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { + err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Range(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableKVClient struct { + kc pb.KVClient + nonRepeatableRetry retryRPCFunc +} + +func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Put(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.DeleteRange(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + // TODO: repeatableRetry if read-only txn + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Txn(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Compact(rctx, in, opts...) + return err + }) + return resp, err +} + +type retryLeaseClient struct { + lc pb.LeaseClient + repeatableRetry retryRPCFunc +} + +// RetryLeaseClient implements a LeaseClient. +func RetryLeaseClient(c *Client) pb.LeaseClient { + retry := &retryLeaseClient{ + pb.NewLeaseClient(c.conn), + c.newRetryWrapper(isRepeatableStopError), + } + return &retryLeaseClient{retry, c.newAuthRetryWrapper()} +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) + return err + }) + return resp, err + +} + +func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) + return err + }) + return stream, err +} + +type retryClusterClient struct { + *nonRepeatableClusterClient + repeatableRetry retryRPCFunc +} + +// RetryClusterClient implements a ClusterClient. +func RetryClusterClient(c *Client) pb.ClusterClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + cc := pb.NewClusterClient(c.conn) + return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} +} + +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberList(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableClusterClient struct { + cc pb.ClusterClient + nonRepeatableRetry retryRPCFunc +} + +func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberRemove(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) + return err + }) + return resp, err +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + mc := pb.NewMaintenanceClient(conn) + return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} +} + +type retryMaintenanceClient struct { + *nonRepeatableMaintenanceClient + repeatableRetry retryRPCFunc +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Alarm(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Status(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Hash(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rmc.mc.Snapshot(rctx, in, opts...) + return err + }) + return stream, err +} + +type nonRepeatableMaintenanceClient struct { + mc pb.MaintenanceClient + nonRepeatableRetry retryRPCFunc +} + +func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Defragment(rctx, in, opts...) + return err + }) + return resp, err +} + +type retryAuthClient struct { + *nonRepeatableAuthClient + repeatableRetry retryRPCFunc +} + +// RetryAuthClient implements a AuthClient. +func RetryAuthClient(c *Client) pb.AuthClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + ac := pb.NewAuthClient(c.conn) + return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} +} + +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserList(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGet(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGet(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleList(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableAuthClient struct { + ac pb.AuthClient + nonRepeatableRetry retryRPCFunc +} + +func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthEnable(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthDisable(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserDelete(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserChangePassword(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGrantRole(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleDelete(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.Authenticate(rctx, in, opts...) + return err + }) + return resp, err +} diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go new file mode 100644 index 000000000000..2bb9d9a13b78 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/sort.go @@ -0,0 +1,37 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +type SortTarget int +type SortOrder int + +const ( + SortNone SortOrder = iota + SortAscend + SortDescend +) + +const ( + SortByKey SortTarget = iota + SortByVersion + SortByCreateRevision + SortByModRevision + SortByValue +) + +type SortOption struct { + Target SortTarget + Order SortOrder +} diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go new file mode 100644 index 000000000000..1a80c8ebaabe --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -0,0 +1,151 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "sync" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// Txn is the interface that wraps mini-transactions. +// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() +// +type Txn interface { + // If takes a list of comparison. If all comparisons passed in succeed, + // the operations passed into Then() will be executed. Or the operations + // passed into Else() will be executed. + If(cs ...Cmp) Txn + + // Then takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() succeed. + Then(ops ...Op) Txn + + // Else takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() fail. + Else(ops ...Op) Txn + + // Commit tries to commit the transaction. + Commit() (*TxnResponse, error) +} + +type txn struct { + kv *kv + ctx context.Context + + mu sync.Mutex + cif bool + cthen bool + celse bool + + isWrite bool + + cmps []*pb.Compare + + sus []*pb.RequestOp + fas []*pb.RequestOp + + callOpts []grpc.CallOption +} + +func (txn *txn) If(cs ...Cmp) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cif { + panic("cannot call If twice!") + } + + if txn.cthen { + panic("cannot call If after Then!") + } + + if txn.celse { + panic("cannot call If after Else!") + } + + txn.cif = true + + for i := range cs { + txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) + } + + return txn +} + +func (txn *txn) Then(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cthen { + panic("cannot call Then twice!") + } + if txn.celse { + panic("cannot call Then after Else!") + } + + txn.cthen = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.sus = append(txn.sus, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Else(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.celse { + panic("cannot call Else twice!") + } + + txn.celse = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.fas = append(txn.fas, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Commit() (*TxnResponse, error) { + txn.mu.Lock() + defer txn.mu.Unlock() + + r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} + + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) + if err != nil { + return nil, toErr(txn.ctx, err) + } + return (*TxnResponse)(resp), nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go new file mode 100644 index 000000000000..16a91fdff404 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -0,0 +1,814 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "fmt" + "sync" + "time" + + v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + EventTypeDelete = mvccpb.DELETE + EventTypePut = mvccpb.PUT + + closeSendErrTimeout = 250 * time.Millisecond +) + +type Event mvccpb.Event + +type WatchChan <-chan WatchResponse + +type Watcher interface { + // Watch watches on a key or prefix. The watched events will be returned + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. + Watch(ctx context.Context, key string, opts ...OpOption) WatchChan + + // Close closes the watcher and cancels all watch requests. + Close() error +} + +type WatchResponse struct { + Header pb.ResponseHeader + Events []*Event + + // CompactRevision is the minimum revision the watcher may receive. + CompactRevision int64 + + // Canceled is used to indicate watch failure. + // If the watch failed and the stream was about to close, before the channel is closed, + // the channel sends a final response that has Canceled set to true with a non-nil Err(). + Canceled bool + + // Created is used to indicate the creation of the watcher. + Created bool + + closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string +} + +// IsCreate returns true if the event tells that the key is newly created. +func (e *Event) IsCreate() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision +} + +// IsModify returns true if the event tells that a new value is put on existing key. +func (e *Event) IsModify() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision +} + +// Err is the error value if this WatchResponse holds an error. +func (wr *WatchResponse) Err() error { + switch { + case wr.closeErr != nil: + return v3rpc.Error(wr.closeErr) + case wr.CompactRevision != 0: + return v3rpc.ErrCompacted + case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } + return v3rpc.ErrFutureRev + } + return nil +} + +// IsProgressNotify returns true if the WatchResponse is progress notification. +func (wr *WatchResponse) IsProgressNotify() bool { + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 +} + +// watcher implements the Watcher interface +type watcher struct { + remote pb.WatchClient + callOpts []grpc.CallOption + + // mu protects the grpc streams map + mu sync.RWMutex + + // streams holds all the active grpc streams keyed by ctx value. + streams map[string]*watchGrpcStream +} + +// watchGrpcStream tracks all watch resources attached to a single grpc stream. +type watchGrpcStream struct { + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption + + // ctx controls internal remote.Watch requests + ctx context.Context + // ctxKey is the key used when looking up this stream's context + ctxKey string + cancel context.CancelFunc + + // substreams holds all active watchers on this grpc stream + substreams map[int64]*watcherStream + // resuming holds all resuming watchers on this grpc stream + resuming []*watcherStream + + // reqc sends a watch request from Watch() to the main goroutine + reqc chan *watchRequest + // respc receives data from the watch client + respc chan *pb.WatchResponse + // donec closes to broadcast shutdown + donec chan struct{} + // errc transmits errors from grpc Recv to the watch stream reconnect logic + errc chan error + // closingc gets the watcherStream of closing watchers + closingc chan *watcherStream + // wg is Done when all substream goroutines have exited + wg sync.WaitGroup + + // resumec closes to signal that all substreams should begin resuming + resumec chan struct{} + // closeErr is the error that closed the watch stream + closeErr error +} + +// watchRequest is issued by the subscriber to start a new watcher +type watchRequest struct { + ctx context.Context + key string + end string + rev int64 + // send created notification event if this field is true + createdNotify bool + // progressNotify is for progress updates + progressNotify bool + // filters is the list of events to filter out + filters []pb.WatchCreateRequest_FilterType + // get the previous key-value pair before the event happens + prevKV bool + // retc receives a chan WatchResponse once the watcher is established + retc chan chan WatchResponse +} + +// watcherStream represents a registered watcher +type watcherStream struct { + // initReq is the request that initiated this request + initReq watchRequest + + // outc publishes watch responses to subscriber + outc chan WatchResponse + // recvc buffers watch responses before publishing + recvc chan *WatchResponse + // donec closes when the watcherStream goroutine stops. + donec chan struct{} + // closing is set to true when stream should be scheduled to shutdown. + closing bool + // id is the registered watch id on the grpc stream + id int64 + + // buf holds all events received from etcd but not yet consumed by the client + buf []*WatchResponse +} + +func NewWatcher(c *Client) Watcher { + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) +} + +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ + remote: wc, + streams: make(map[string]*watchGrpcStream), + } + if c != nil { + w.callOpts = c.callOpts + } + return w +} + +// never closes +var valCtxCh = make(chan struct{}) +var zeroTime = time.Unix(0, 0) + +// ctx with only the values; never Done +type valCtx struct{ context.Context } + +func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } +func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } +func (vc *valCtx) Err() error { return nil } + +func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { + ctx, cancel := context.WithCancel(&valCtx{inctx}) + wgs := &watchGrpcStream{ + owner: w, + remote: w.remote, + callOpts: w.callOpts, + ctx: ctx, + ctxKey: streamKeyFromCtx(inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), + respc: make(chan *pb.WatchResponse), + reqc: make(chan *watchRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), + } + go wgs.run() + return wgs +} + +// Watch posts a watch request to run() and waits for a new watcher channel +func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { + ow := opWatch(key, opts...) + + var filters []pb.WatchCreateRequest_FilterType + if ow.filterPut { + filters = append(filters, pb.WatchCreateRequest_NOPUT) + } + if ow.filterDelete { + filters = append(filters, pb.WatchCreateRequest_NODELETE) + } + + wr := &watchRequest{ + ctx: ctx, + createdNotify: ow.createdNotify, + key: string(ow.key), + end: string(ow.end), + rev: ow.rev, + progressNotify: ow.progressNotify, + filters: filters, + prevKV: ow.prevKV, + retc: make(chan chan WatchResponse, 1), + } + + ok := false + ctxKey := streamKeyFromCtx(ctx) + + // find or allocate appropriate grpc watch stream + w.mu.Lock() + if w.streams == nil { + // closed + w.mu.Unlock() + ch := make(chan WatchResponse) + close(ch) + return ch + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + // couldn't create channel; return closed channel + closeCh := make(chan WatchResponse, 1) + + // submit request + select { + case reqc <- wr: + ok = true + case <-wr.ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + + // receive channel + if ok { + select { + case ret := <-wr.retc: + return ret + case <-ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + } + + close(closeCh) + return closeCh +} + +func (w *watcher) Close() (err error) { + w.mu.Lock() + streams := w.streams + w.streams = nil + w.mu.Unlock() + for _, wgs := range streams { + if werr := wgs.close(); werr != nil { + err = werr + } + } + return err +} + +func (w *watchGrpcStream) close() (err error) { + w.cancel() + <-w.donec + select { + case err = <-w.errc: + default: + } + return toErr(w.ctx, err) +} + +func (w *watcher) closeStream(wgs *watchGrpcStream) { + w.mu.Lock() + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) + } + w.mu.Unlock() +} + +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { + if resp.WatchId == -1 { + // failed; no channel + close(ws.recvc) + return + } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} + +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): + } + close(ws.outc) +} + +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } + // close subscriber's channel + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr}) + } else if ws.outc != nil { + close(ws.outc) + } + if ws.id != -1 { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } + } +} + +// run is the root of the goroutines for managing a watcher client +func (w *watchGrpcStream) run() { + var wc pb.Watch_WatchClient + var closeErr error + + // substreams marked to close but goroutine still running; needed for + // avoiding double-closing recvc on grpc stream teardown + closing := make(map[*watcherStream]struct{}) + + defer func() { + w.closeErr = closeErr + // shutdown substreams and resuming substreams + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + w.joinSubstreams() + for range closing { + w.closeSubstream(<-w.closingc) + } + w.wg.Wait() + w.owner.closeStream(w) + }() + + // start a stream with the etcd grpc server + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + + cancelSet := make(map[int64]struct{}) + + for { + select { + // Watch() requested + case wreq := <-w.reqc: + outc := make(chan WatchResponse, 1) + ws := &watcherStream{ + initReq: *wreq, + id: -1, + outc: outc, + // unbuffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + wc.Send(ws.initReq.toPB()) + } + // New events from the watch client + case pbresp := <-w.respc: + switch { + case pbresp.Created: + // response to head of queue creation + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) + } + case pbresp.Canceled: + delete(cancelSet, pbresp.WatchId) + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc + close(ws.recvc) + closing[ws] = struct{}{} + } + default: + // dispatch to appropriate watch stream + if ok := w.dispatchEvent(pbresp); ok { + break + } + // watch response on unexpected watch id; cancel id + if _, ok := cancelSet[pbresp.WatchId]; ok { + break + } + cancelSet[pbresp.WatchId] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: pbresp.WatchId, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + wc.Send(req) + } + // watch client failed on Recv; spawn another if possible + case err := <-w.errc: + if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + closeErr = err + return + } + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) + } + cancelSet = make(map[int64]struct{}) + case <-w.ctx.Done(): + return + case ws := <-w.closingc: + w.closeSubstream(ws) + delete(closing, ws) + if len(w.substreams)+len(w.resuming) == 0 { + // no more watchers on this stream, shutdown + return + } + } + } +} + +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] + } + w.resuming = w.resuming[1:len(w.resuming)] + } + return nil +} + +// dispatchEvent sends a WatchResponse to the appropriate watcher stream +func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { + events := make([]*Event, len(pbresp.Events)) + for i, ev := range pbresp.Events { + events[i] = (*Event)(ev) + } + wr := &WatchResponse{ + Header: *pbresp.Header, + Events: events, + CompactRevision: pbresp.CompactRevision, + Created: pbresp.Created, + Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + ws, ok := w.substreams[pbresp.WatchId] + if !ok { + return false + } + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } + return true +} + +// serveWatchClient forwards messages from the grpc stream to run() +func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { + for { + resp, err := wc.Recv() + if err != nil { + select { + case w.errc <- err: + case <-w.donec: + } + return + } + select { + case w.respc <- resp: + case <-w.donec: + return + } + } +} + +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev + resuming := false + defer func() { + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws + } + w.wg.Done() + }() + + emptyWr := &WatchResponse{} + for { + curWr := emptyWr + outc := ws.outc + + if len(ws.buf) > 0 { + curWr = ws.buf[0] + } else { + outc = nil + } + select { + case outc <- *curWr: + if ws.buf[0].Err() != nil { + return + } + ws.buf[0] = nil + ws.buf = ws.buf[1:] + case wr, ok := <-ws.recvc: + if !ok { + // shutdown from closeSubstream + return + } + + if wr.Created { + if ws.initReq.retc != nil { + ws.initReq.retc <- ws.outc + // to prevent next write from taking the slot in buffered channel + // and posting duplicate create events + ws.initReq.retc = nil + + // send first creation event only if requested + if ws.initReq.createdNotify { + ws.outc <- *wr + } + // once the watch channel is returned, a current revision + // watch must resume at the store revision. This is necessary + // for the following case to work as expected: + // wch := m1.Watch("a") + // m2.Put("a", "b") + // <-wch + // If the revision is only bound on the first observed event, + // if wch is disconnected before the Put is issued, then reconnects + // after it is committed, it'll miss the Put. + if ws.initReq.rev == 0 { + nextRev = wr.Header.Revision + } + } + } else { + // current progress of watch; <= store revision + nextRev = wr.Header.Revision + } + + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 + } + ws.initReq.rev = nextRev + + // created event is already sent above, + // watcher should not post duplicate events + if wr.Created { + continue + } + + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + case <-w.ctx.Done(): + return + case <-ws.initReq.ctx.Done(): + return + case <-resumec: + resuming = true + return + } + } + // lazily send cancel message if events on missing id +} + +func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { + // mark all substreams as resuming + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = -1 + w.resuming = append(w.resuming, ws) + } + // strip out nils, if any + var resuming []*watcherStream + for _, ws := range w.resuming { + if ws != nil { + resuming = append(resuming, ws) + } + } + w.resuming = resuming + w.substreams = make(map[int64]*watcherStream) + + // connect to grpc stream while accepting watcher cancelation + stopc := make(chan struct{}) + donec := w.waitCancelSubstreams(stopc) + wc, err := w.openWatchClient() + close(stopc) + <-donec + + // serve all non-closing streams, even if there's a client error + // so that the teardown path can shutdown the streams as expected. + for _, ws := range w.resuming { + if ws.closing { + continue + } + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + } + + if err != nil { + return nil, v3rpc.Error(err) + } + + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil +} + +func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { + var wg sync.WaitGroup + wg.Add(len(w.resuming)) + donec := make(chan struct{}) + for i := range w.resuming { + go func(ws *watcherStream) { + defer wg.Done() + if ws.closing { + if ws.initReq.ctx.Err() != nil && ws.outc != nil { + close(ws.outc) + ws.outc = nil + } + return + } + select { + case <-ws.initReq.ctx.Done(): + // closed ws will be removed from resuming + ws.closing = true + close(ws.outc) + ws.outc = nil + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() + case <-stopc: + } + }(w.resuming[i]) + } + go func() { + defer close(donec) + wg.Wait() + }() + return donec +} + +// joinSubstreams waits for all substream goroutines to complete. +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec + } + } +} + +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false +func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { + for { + select { + case <-w.ctx.Done(): + if err == nil { + return nil, w.ctx.Err() + } + return nil, err + default: + } + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { + break + } + if isHaltErr(w.ctx, err) { + return nil, v3rpc.Error(err) + } + } + return ws, nil +} + +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. +func (wr *watchRequest) toPB() *pb.WatchRequest { + req := &pb.WatchCreateRequest{ + StartRevision: wr.rev, + Key: []byte(wr.key), + RangeEnd: []byte(wr.end), + ProgressNotify: wr.progressNotify, + Filters: wr.filters, + PrevKv: wr.prevKV, + } + cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/github.com/coreos/etcd/cmd/etcd b/vendor/github.com/coreos/etcd/cmd/etcd new file mode 120000 index 000000000000..b870225aa053 --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/etcd @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/etcdctl b/vendor/github.com/coreos/etcd/cmd/etcdctl new file mode 120000 index 000000000000..05bb269d6096 --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/etcdctl @@ -0,0 +1 @@ +../etcdctl \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/tools b/vendor/github.com/coreos/etcd/cmd/tools new file mode 120000 index 000000000000..4887d6e0c92b --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/tools @@ -0,0 +1 @@ +../tools \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/compactor/BUILD b/vendor/github.com/coreos/etcd/compactor/BUILD new file mode 100644 index 000000000000..96e4d07dc6e0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/compactor/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "compactor.go", + "doc.go", + ], + importpath = "github.com/coreos/etcd/compactor", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/jonboulle/clockwork:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go new file mode 100644 index 000000000000..5cf7b65094a3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/compactor/compactor.go @@ -0,0 +1,137 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compactor + +import ( + "sync" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/pkg/capnslog" + "github.com/jonboulle/clockwork" + "golang.org/x/net/context" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor") +) + +const ( + checkCompactionInterval = 5 * time.Minute + executeCompactionInterval = time.Hour +) + +type Compactable interface { + Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) +} + +type RevGetter interface { + Rev() int64 +} + +// Periodic compacts the log by purging revisions older than +// the configured retention time. Compaction happens hourly. +type Periodic struct { + clock clockwork.Clock + periodInHour int + + rg RevGetter + c Compactable + + revs []int64 + ctx context.Context + cancel context.CancelFunc + + mu sync.Mutex + paused bool +} + +func NewPeriodic(h int, rg RevGetter, c Compactable) *Periodic { + return &Periodic{ + clock: clockwork.NewRealClock(), + periodInHour: h, + rg: rg, + c: c, + } +} + +func (t *Periodic) Run() { + t.ctx, t.cancel = context.WithCancel(context.Background()) + t.revs = make([]int64, 0) + clock := t.clock + + go func() { + last := clock.Now() + for { + t.revs = append(t.revs, t.rg.Rev()) + select { + case <-t.ctx.Done(): + return + case <-clock.After(checkCompactionInterval): + t.mu.Lock() + p := t.paused + t.mu.Unlock() + if p { + continue + } + } + + if clock.Now().Sub(last) < executeCompactionInterval { + continue + } + + rev, remaining := t.getRev(t.periodInHour) + if rev < 0 { + continue + } + + plog.Noticef("Starting auto-compaction at revision %d", rev) + _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) + if err == nil || err == mvcc.ErrCompacted { + t.revs = remaining + last = clock.Now() + plog.Noticef("Finished auto-compaction at revision %d", rev) + } else { + plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) + plog.Noticef("Retry after %v", checkCompactionInterval) + } + } + }() +} + +func (t *Periodic) Stop() { + t.cancel() +} + +func (t *Periodic) Pause() { + t.mu.Lock() + defer t.mu.Unlock() + t.paused = true +} + +func (t *Periodic) Resume() { + t.mu.Lock() + defer t.mu.Unlock() + t.paused = false +} + +func (t *Periodic) getRev(h int) (int64, []int64) { + i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) + if i < 0 { + return -1, t.revs + } + return t.revs[i], t.revs[i+1:] +} diff --git a/vendor/github.com/coreos/etcd/compactor/doc.go b/vendor/github.com/coreos/etcd/compactor/doc.go new file mode 100644 index 000000000000..cb158340e493 --- /dev/null +++ b/vendor/github.com/coreos/etcd/compactor/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compactor implements automated policies for compacting etcd's mvcc storage. +package compactor diff --git a/vendor/github.com/coreos/etcd/discovery/BUILD b/vendor/github.com/coreos/etcd/discovery/BUILD new file mode 100644 index 000000000000..496402a67613 --- /dev/null +++ b/vendor/github.com/coreos/etcd/discovery/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["discovery.go"], + importpath = "github.com/coreos/etcd/discovery", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/client:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/jonboulle/clockwork:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/discovery/discovery.go b/vendor/github.com/coreos/etcd/discovery/discovery.go new file mode 100644 index 000000000000..edc842ffb971 --- /dev/null +++ b/vendor/github.com/coreos/etcd/discovery/discovery.go @@ -0,0 +1,362 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package discovery provides an implementation of the cluster discovery that +// is used by etcd. +package discovery + +import ( + "errors" + "fmt" + "math" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/coreos/etcd/client" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/pkg/capnslog" + "github.com/jonboulle/clockwork" + "golang.org/x/net/context" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "discovery") + + ErrInvalidURL = errors.New("discovery: invalid URL") + ErrBadSizeKey = errors.New("discovery: size key is bad") + ErrSizeNotFound = errors.New("discovery: size key not found") + ErrTokenNotFound = errors.New("discovery: token not found") + ErrDuplicateID = errors.New("discovery: found duplicate id") + ErrDuplicateName = errors.New("discovery: found duplicate name") + ErrFullCluster = errors.New("discovery: cluster is full") + ErrTooManyRetries = errors.New("discovery: too many retries") + ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint") +) + +var ( + // Number of retries discovery will attempt before giving up and erroring out. + nRetries = uint(math.MaxUint32) + maxExpoentialRetries = uint(8) +) + +// JoinCluster will connect to the discovery service at the given url, and +// register the server represented by the given id and config to the cluster +func JoinCluster(durl, dproxyurl string, id types.ID, config string) (string, error) { + d, err := newDiscovery(durl, dproxyurl, id) + if err != nil { + return "", err + } + return d.joinCluster(config) +} + +// GetCluster will connect to the discovery service at the given url and +// retrieve a string describing the cluster +func GetCluster(durl, dproxyurl string) (string, error) { + d, err := newDiscovery(durl, dproxyurl, 0) + if err != nil { + return "", err + } + return d.getCluster() +} + +type discovery struct { + cluster string + id types.ID + c client.KeysAPI + retries uint + url *url.URL + + clock clockwork.Clock +} + +// newProxyFunc builds a proxy function from the given string, which should +// represent a URL that can be used as a proxy. It performs basic +// sanitization of the URL and returns any error encountered. +func newProxyFunc(proxy string) (func(*http.Request) (*url.URL, error), error) { + if proxy == "" { + return nil, nil + } + // Do a small amount of URL sanitization to help the user + // Derived from net/http.ProxyFromEnvironment + proxyURL, err := url.Parse(proxy) + if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we ignore the + // error and complain about the original one + var err2 error + proxyURL, err2 = url.Parse("http://" + proxy) + if err2 == nil { + err = nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + + plog.Infof("using proxy %q", proxyURL.String()) + return http.ProxyURL(proxyURL), nil +} + +func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) { + u, err := url.Parse(durl) + if err != nil { + return nil, err + } + token := u.Path + u.Path = "" + pf, err := newProxyFunc(dproxyurl) + if err != nil { + return nil, err + } + + // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early + tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second) + if err != nil { + return nil, err + } + tr.Proxy = pf + cfg := client.Config{ + Transport: tr, + Endpoints: []string{u.String()}, + } + c, err := client.New(cfg) + if err != nil { + return nil, err + } + dc := client.NewKeysAPIWithPrefix(c, "") + return &discovery{ + cluster: token, + c: dc, + id: id, + url: u, + clock: clockwork.NewRealClock(), + }, nil +} + +func (d *discovery) joinCluster(config string) (string, error) { + // fast path: if the cluster is full, return the error + // do not need to register to the cluster in this case. + if _, _, _, err := d.checkCluster(); err != nil { + return "", err + } + + if err := d.createSelf(config); err != nil { + // Fails, even on a timeout, if createSelf times out. + // TODO(barakmich): Retrying the same node might want to succeed here + // (ie, createSelf should be idempotent for discovery). + return "", err + } + + nodes, size, index, err := d.checkCluster() + if err != nil { + return "", err + } + + all, err := d.waitNodes(nodes, size, index) + if err != nil { + return "", err + } + + return nodesToCluster(all, size) +} + +func (d *discovery) getCluster() (string, error) { + nodes, size, index, err := d.checkCluster() + if err != nil { + if err == ErrFullCluster { + return nodesToCluster(nodes, size) + } + return "", err + } + + all, err := d.waitNodes(nodes, size, index) + if err != nil { + return "", err + } + return nodesToCluster(all, size) +} + +func (d *discovery) createSelf(contents string) error { + ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) + resp, err := d.c.Create(ctx, d.selfKey(), contents) + cancel() + if err != nil { + if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist { + return ErrDuplicateID + } + return err + } + + // ensure self appears on the server we connected to + w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1}) + _, err = w.Next(context.Background()) + return err +} + +func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) { + configKey := path.Join("/", d.cluster, "_config") + ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) + // find cluster size + resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil) + cancel() + if err != nil { + if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound { + return nil, 0, 0, ErrSizeNotFound + } + if err == client.ErrInvalidJSON { + return nil, 0, 0, ErrBadDiscoveryEndpoint + } + if ce, ok := err.(*client.ClusterError); ok { + plog.Error(ce.Detail()) + return d.checkClusterRetry() + } + return nil, 0, 0, err + } + size, err := strconv.Atoi(resp.Node.Value) + if err != nil { + return nil, 0, 0, ErrBadSizeKey + } + + ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout) + resp, err = d.c.Get(ctx, d.cluster, nil) + cancel() + if err != nil { + if ce, ok := err.(*client.ClusterError); ok { + plog.Error(ce.Detail()) + return d.checkClusterRetry() + } + return nil, 0, 0, err + } + var nodes []*client.Node + // append non-config keys to nodes + for _, n := range resp.Node.Nodes { + if !(path.Base(n.Key) == path.Base(configKey)) { + nodes = append(nodes, n) + } + } + + snodes := sortableNodes{nodes} + sort.Sort(snodes) + + // find self position + for i := range nodes { + if path.Base(nodes[i].Key) == path.Base(d.selfKey()) { + break + } + if i >= size-1 { + return nodes[:size], size, resp.Index, ErrFullCluster + } + } + return nodes, size, resp.Index, nil +} + +func (d *discovery) logAndBackoffForRetry(step string) { + d.retries++ + // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward. + retries := d.retries + if retries > maxExpoentialRetries { + retries = maxExpoentialRetries + } + retryTimeInSecond := time.Duration(0x1< size { + nodes = nodes[:size] + } + // watch from the next index + w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true}) + all := make([]*client.Node, len(nodes)) + copy(all, nodes) + for _, n := range all { + if path.Base(n.Key) == path.Base(d.selfKey()) { + plog.Noticef("found self %s in the cluster", path.Base(d.selfKey())) + } else { + plog.Noticef("found peer %s in the cluster", path.Base(n.Key)) + } + } + + // wait for others + for len(all) < size { + plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all)) + resp, err := w.Next(context.Background()) + if err != nil { + if ce, ok := err.(*client.ClusterError); ok { + plog.Error(ce.Detail()) + return d.waitNodesRetry() + } + return nil, err + } + plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key)) + all = append(all, resp.Node) + } + plog.Noticef("found %d needed peer(s)", len(all)) + return all, nil +} + +func (d *discovery) selfKey() string { + return path.Join("/", d.cluster, d.id.String()) +} + +func nodesToCluster(ns []*client.Node, size int) (string, error) { + s := make([]string, len(ns)) + for i, n := range ns { + s[i] = n.Value + } + us := strings.Join(s, ",") + m, err := types.NewURLsMap(us) + if err != nil { + return us, ErrInvalidURL + } + if m.Len() != size { + return us, ErrDuplicateName + } + return us, nil +} + +type sortableNodes struct{ Nodes []*client.Node } + +func (ns sortableNodes) Len() int { return len(ns.Nodes) } +func (ns sortableNodes) Less(i, j int) bool { + return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex +} +func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] } diff --git a/vendor/github.com/coreos/etcd/error/BUILD b/vendor/github.com/coreos/etcd/error/BUILD new file mode 100644 index 000000000000..fe36d8208de2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/error/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["error.go"], + importpath = "github.com/coreos/etcd/error", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go new file mode 100644 index 000000000000..b541a628b87f --- /dev/null +++ b/vendor/github.com/coreos/etcd/error/error.go @@ -0,0 +1,163 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package error describes errors in etcd project. When any change happens, +// Documentation/v2/errorcode.md needs to be updated correspondingly. +package error + +import ( + "encoding/json" + "fmt" + "net/http" +) + +var errors = map[int]string{ + // command related errors + EcodeKeyNotFound: "Key not found", + EcodeTestFailed: "Compare failed", //test and set + EcodeNotFile: "Not a file", + ecodeNoMorePeer: "Reached the max number of peers in the cluster", + EcodeNotDir: "Not a directory", + EcodeNodeExist: "Key already exists", // create + ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd", + EcodeRootROnly: "Root is read only", + EcodeDirNotEmpty: "Directory not empty", + ecodeExistingPeerAddr: "Peer address has existed", + EcodeUnauthorized: "The request requires user authentication", + + // Post form related errors + ecodeValueRequired: "Value is Required in POST form", + EcodePrevValueRequired: "PrevValue is Required in POST form", + EcodeTTLNaN: "The given TTL in POST form is not a number", + EcodeIndexNaN: "The given index in POST form is not a number", + ecodeValueOrTTLRequired: "Value or TTL is required in POST form", + ecodeTimeoutNaN: "The given timeout in POST form is not a number", + ecodeNameRequired: "Name is required in POST form", + ecodeIndexOrValueRequired: "Index or value is required", + ecodeIndexValueMutex: "Index and value cannot both be specified", + EcodeInvalidField: "Invalid field", + EcodeInvalidForm: "Invalid POST form", + EcodeRefreshValue: "Value provided on refresh", + EcodeRefreshTTLRequired: "A TTL must be provided on refresh", + + // raft related errors + EcodeRaftInternal: "Raft Internal Error", + EcodeLeaderElect: "During Leader Election", + + // etcd related errors + EcodeWatcherCleared: "watcher is cleared due to etcd recovery", + EcodeEventIndexCleared: "The event in requested index is outdated and cleared", + ecodeStandbyInternal: "Standby Internal Error", + ecodeInvalidActiveSize: "Invalid active size", + ecodeInvalidRemoveDelay: "Standby remove delay", + + // client related errors + ecodeClientInternal: "Client Internal Error", +} + +var errorStatus = map[int]int{ + EcodeKeyNotFound: http.StatusNotFound, + EcodeNotFile: http.StatusForbidden, + EcodeDirNotEmpty: http.StatusForbidden, + EcodeUnauthorized: http.StatusUnauthorized, + EcodeTestFailed: http.StatusPreconditionFailed, + EcodeNodeExist: http.StatusPreconditionFailed, + EcodeRaftInternal: http.StatusInternalServerError, + EcodeLeaderElect: http.StatusInternalServerError, +} + +const ( + EcodeKeyNotFound = 100 + EcodeTestFailed = 101 + EcodeNotFile = 102 + ecodeNoMorePeer = 103 + EcodeNotDir = 104 + EcodeNodeExist = 105 + ecodeKeyIsPreserved = 106 + EcodeRootROnly = 107 + EcodeDirNotEmpty = 108 + ecodeExistingPeerAddr = 109 + EcodeUnauthorized = 110 + + ecodeValueRequired = 200 + EcodePrevValueRequired = 201 + EcodeTTLNaN = 202 + EcodeIndexNaN = 203 + ecodeValueOrTTLRequired = 204 + ecodeTimeoutNaN = 205 + ecodeNameRequired = 206 + ecodeIndexOrValueRequired = 207 + ecodeIndexValueMutex = 208 + EcodeInvalidField = 209 + EcodeInvalidForm = 210 + EcodeRefreshValue = 211 + EcodeRefreshTTLRequired = 212 + + EcodeRaftInternal = 300 + EcodeLeaderElect = 301 + + EcodeWatcherCleared = 400 + EcodeEventIndexCleared = 401 + ecodeStandbyInternal = 402 + ecodeInvalidActiveSize = 403 + ecodeInvalidRemoveDelay = 404 + + ecodeClientInternal = 500 +) + +type Error struct { + ErrorCode int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause,omitempty"` + Index uint64 `json:"index"` +} + +func NewRequestError(errorCode int, cause string) *Error { + return NewError(errorCode, cause, 0) +} + +func NewError(errorCode int, cause string, index uint64) *Error { + return &Error{ + ErrorCode: errorCode, + Message: errors[errorCode], + Cause: cause, + Index: index, + } +} + +// Error is for the error interface +func (e Error) Error() string { + return e.Message + " (" + e.Cause + ")" +} + +func (e Error) toJsonString() string { + b, _ := json.Marshal(e) + return string(b) +} + +func (e Error) StatusCode() int { + status, ok := errorStatus[e.ErrorCode] + if !ok { + status = http.StatusBadRequest + } + return status +} + +func (e Error) WriteTo(w http.ResponseWriter) error { + w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(e.StatusCode()) + _, err := w.Write([]byte(e.toJsonString() + "\n")) + return err +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/BUILD b/vendor/github.com/coreos/etcd/etcdserver/BUILD new file mode 100644 index 000000000000..6e8232c88aad --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/BUILD @@ -0,0 +1,86 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "apply.go", + "apply_auth.go", + "apply_v2.go", + "backend.go", + "cluster_util.go", + "config.go", + "consistent_index.go", + "doc.go", + "errors.go", + "metrics.go", + "quota.go", + "raft.go", + "server.go", + "snapshot_merge.go", + "storage.go", + "util.go", + "v2_server.go", + "v3_server.go", + ], + importpath = "github.com/coreos/etcd/etcdserver", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/alarm:go_default_library", + "//vendor/github.com/coreos/etcd/auth:go_default_library", + "//vendor/github.com/coreos/etcd/compactor:go_default_library", + "//vendor/github.com/coreos/etcd/discovery:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library", + "//vendor/github.com/coreos/etcd/lease:go_default_library", + "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/contention:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/idutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/runtime:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/schedule:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/wait:go_default_library", + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", + "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", + "//vendor/github.com/coreos/etcd/snap:go_default_library", + "//vendor/github.com/coreos/etcd/store:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", + "//vendor/github.com/coreos/etcd/wal:go_default_library", + "//vendor/github.com/coreos/etcd/wal/walpb:go_default_library", + "//vendor/github.com/coreos/go-semver/semver:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/membership:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/stats:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD new file mode 100644 index 000000000000..6c2b8f55744f --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "capability.go", + "cluster.go", + "doc.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", + "//vendor/github.com/coreos/go-semver/semver:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:all-srcs", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go new file mode 100644 index 000000000000..5e2de58e9a10 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go @@ -0,0 +1,86 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "sync" + + "github.com/coreos/etcd/version" + "github.com/coreos/go-semver/semver" + "github.com/coreos/pkg/capnslog" +) + +type Capability string + +const ( + AuthCapability Capability = "auth" + V3rpcCapability Capability = "v3rpc" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") + + // capabilityMaps is a static map of version to capability map. + capabilityMaps = map[string]map[Capability]bool{ + "3.0.0": {AuthCapability: true, V3rpcCapability: true}, + "3.1.0": {AuthCapability: true, V3rpcCapability: true}, + "3.2.0": {AuthCapability: true, V3rpcCapability: true}, + } + + enableMapMu sync.RWMutex + // enabledMap points to a map in capabilityMaps + enabledMap map[Capability]bool + + curVersion *semver.Version +) + +func init() { + enabledMap = map[Capability]bool{ + AuthCapability: true, + V3rpcCapability: true, + } +} + +// UpdateCapability updates the enabledMap when the cluster version increases. +func UpdateCapability(v *semver.Version) { + if v == nil { + // if recovered but version was never set by cluster + return + } + enableMapMu.Lock() + if curVersion != nil && !curVersion.LessThan(*v) { + enableMapMu.Unlock() + return + } + curVersion = v + enabledMap = capabilityMaps[curVersion.String()] + enableMapMu.Unlock() + plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) +} + +func IsCapabilityEnabled(c Capability) bool { + enableMapMu.RLock() + defer enableMapMu.RUnlock() + if enabledMap == nil { + return false + } + return enabledMap[c] +} + +func EnableCapability(c Capability) { + enableMapMu.Lock() + defer enableMapMu.Unlock() + enabledMap[c] = true +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go new file mode 100644 index 000000000000..87face4a139c --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go @@ -0,0 +1,41 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/types" + + "github.com/coreos/go-semver/semver" +) + +// Cluster is an interface representing a collection of members in one etcd cluster. +type Cluster interface { + // ID returns the cluster ID + ID() types.ID + // ClientURLs returns an aggregate set of all URLs on which this + // cluster is listening for client requests + ClientURLs() []string + // Members returns a slice of members sorted by their ID + Members() []*membership.Member + // Member retrieves a particular member based on ID, or nil if the + // member does not exist in the cluster + Member(id types.ID) *membership.Member + // IsIDRemoved checks whether the given ID has been removed from this + // cluster at some point in the past + IsIDRemoved(id types.ID) bool + // Version is the cluster-wide minimum major.minor version. + Version() *semver.Version +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go new file mode 100644 index 000000000000..f44881be663e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package api manages the capabilities and features that are exposed to clients by the etcd cluster. +package api diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD new file mode 100644 index 000000000000..8a2313901f86 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "member.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go new file mode 100644 index 000000000000..0657604ca976 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptypes + +import ( + "encoding/json" + "net/http" + + "github.com/coreos/pkg/capnslog" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes") +) + +type HTTPError struct { + Message string `json:"message"` + // Code is the HTTP status code + Code int `json:"-"` +} + +func (e HTTPError) Error() string { + return e.Message +} + +func (e HTTPError) WriteTo(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(e.Code) + b, err := json.Marshal(e) + if err != nil { + plog.Panicf("marshal HTTPError should never fail (%v)", err) + } + if _, err := w.Write(b); err != nil { + return err + } + return nil +} + +func NewHTTPError(code int, m string) *HTTPError { + return &HTTPError{ + Message: m, + Code: code, + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go new file mode 100644 index 000000000000..738d74432f84 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go @@ -0,0 +1,69 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httptypes defines how etcd's HTTP API entities are serialized to and +// deserialized from JSON. +package httptypes + +import ( + "encoding/json" + + "github.com/coreos/etcd/pkg/types" +) + +type Member struct { + ID string `json:"id"` + Name string `json:"name"` + PeerURLs []string `json:"peerURLs"` + ClientURLs []string `json:"clientURLs"` +} + +type MemberCreateRequest struct { + PeerURLs types.URLs +} + +type MemberUpdateRequest struct { + MemberCreateRequest +} + +func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{} + + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + urls, err := types.NewURLs(s.PeerURLs) + if err != nil { + return err + } + + m.PeerURLs = urls + return nil +} + +type MemberCollection []Member + +func (c *MemberCollection) MarshalJSON() ([]byte, error) { + d := struct { + Members []Member `json:"members"` + }{ + Members: []Member(*c), + } + + return json.Marshal(d) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD new file mode 100644 index 000000000000..e1ada36c3030 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "error.go", + "md.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", + visibility = ["//visibility:public"], + deps = [ + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go new file mode 100644 index 000000000000..f72c6a644f33 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction. +package rpctypes diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go new file mode 100644 index 000000000000..bd17179e9977 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -0,0 +1,198 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + // server-side error + ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") + ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found") + ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided") + ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided") + ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") + ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") + ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") + ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") + ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") + + ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found") + ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists") + + ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist") + ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists") + ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members") + ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid") + ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found") + + ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large") + ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests") + + ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist") + ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role") + ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists") + ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty") + ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found") + ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists") + ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found") + ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password") + ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied") + ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user") + ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") + ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled") + ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token") + ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management") + + ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") + ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") + ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped") + ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out") + ErrGRPCTimeoutDueToLeaderFail = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure") + ErrGRPCTimeoutDueToConnectionLost = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost") + ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster") + + errStringToError = map[string]error{ + grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + + grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, + grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, + grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, + grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, + grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, + + grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, + grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, + + grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, + grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, + grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, + grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, + grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, + + grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, + grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, + + grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, + grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, + grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, + grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, + grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, + grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, + grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, + grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, + grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, + grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, + grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, + grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, + grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, + + grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, + grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, + grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, + grpc.ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, + grpc.ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, + grpc.ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, + } + + // client-side error + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) + + ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) + ErrLeaseExist = Error(ErrGRPCLeaseExist) + + ErrMemberExist = Error(ErrGRPCMemberExist) + ErrPeerURLExist = Error(ErrGRPCPeerURLExist) + ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) + ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) + ErrMemberNotFound = Error(ErrGRPCMemberNotFound) + + ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) + ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) + + ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) + ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) + ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) + ErrUserEmpty = Error(ErrGRPCUserEmpty) + ErrUserNotFound = Error(ErrGRPCUserNotFound) + ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) + ErrRoleNotFound = Error(ErrGRPCRoleNotFound) + ErrAuthFailed = Error(ErrGRPCAuthFailed) + ErrPermissionDenied = Error(ErrGRPCPermissionDenied) + ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) + ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) + ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) + ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) + + ErrNoLeader = Error(ErrGRPCNoLeader) + ErrNotCapable = Error(ErrGRPCNotCapable) + ErrStopped = Error(ErrGRPCStopped) + ErrTimeout = Error(ErrGRPCTimeout) + ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) + ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) + ErrUnhealthy = Error(ErrGRPCUnhealthy) +) + +// EtcdError defines gRPC server errors. +// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) +type EtcdError struct { + code codes.Code + desc string +} + +// Code returns grpc/codes.Code. +// TODO: define clientv3/codes.Code. +func (e EtcdError) Code() codes.Code { + return e.code +} + +func (e EtcdError) Error() string { + return e.desc +} + +func Error(err error) error { + if err == nil { + return nil + } + verr, ok := errStringToError[grpc.ErrorDesc(err)] + if !ok { // not gRPC error + return err + } + return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)} +} + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go new file mode 100644 index 000000000000..5c590e1aec99 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go @@ -0,0 +1,20 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + MetadataRequireLeaderKey = "hasleader" + MetadataHasLeader = "true" +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go new file mode 100644 index 000000000000..0be93c52b6f4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/apply.go @@ -0,0 +1,878 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "bytes" + "sort" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/mvccpb" + "github.com/coreos/etcd/pkg/types" + "github.com/gogo/protobuf/proto" + "golang.org/x/net/context" +) + +const ( + warnApplyDuration = 100 * time.Millisecond +) + +type applyResult struct { + resp proto.Message + err error + // physc signals the physical effect of the request has completed in addition + // to being logically reflected by the node. Currently only used for + // Compaction requests. + physc <-chan struct{} +} + +// applierV3 is the interface for processing V3 raft messages +type applierV3 interface { + Apply(r *pb.InternalRaftRequest) *applyResult + + Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) + Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) + DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) + Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) + + LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) + LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) + + Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) + + Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) + + AuthEnable() (*pb.AuthEnableResponse, error) + AuthDisable() (*pb.AuthDisableResponse, error) + + UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) +} + +type applierV3backend struct { + s *EtcdServer +} + +func (s *EtcdServer) newApplierV3() applierV3 { + return newAuthApplierV3( + s.AuthStore(), + newQuotaApplierV3(s, &applierV3backend{s}), + ) +} + +func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { + ar := &applyResult{} + + // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls + switch { + case r.Range != nil: + ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) + case r.Put != nil: + ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) + case r.DeleteRange != nil: + ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) + case r.Txn != nil: + ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) + case r.Compaction != nil: + ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction) + case r.LeaseGrant != nil: + ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) + case r.LeaseRevoke != nil: + ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) + case r.Alarm != nil: + ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) + case r.Authenticate != nil: + ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) + case r.AuthEnable != nil: + ar.resp, ar.err = a.s.applyV3.AuthEnable() + case r.AuthDisable != nil: + ar.resp, ar.err = a.s.applyV3.AuthDisable() + case r.AuthUserAdd != nil: + ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) + case r.AuthUserDelete != nil: + ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) + case r.AuthUserChangePassword != nil: + ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) + case r.AuthUserGrantRole != nil: + ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) + case r.AuthUserGet != nil: + ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) + case r.AuthUserRevokeRole != nil: + ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) + case r.AuthRoleAdd != nil: + ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) + case r.AuthRoleGrantPermission != nil: + ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) + case r.AuthRoleGet != nil: + ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) + case r.AuthRoleRevokePermission != nil: + ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) + case r.AuthRoleDelete != nil: + ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) + case r.AuthUserList != nil: + ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) + case r.AuthRoleList != nil: + ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) + default: + panic("not implemented") + } + return ar +} + +func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { + resp = &pb.PutResponse{} + resp.Header = &pb.ResponseHeader{} + + val, leaseID := p.Value, lease.LeaseID(p.Lease) + if txn == nil { + if leaseID != lease.NoLease { + if l := a.s.lessor.Lookup(leaseID); l == nil { + return nil, lease.ErrLeaseNotFound + } + } + txn = a.s.KV().Write() + defer txn.End() + } + + var rr *mvcc.RangeResult + if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + } + if p.IgnoreValue || p.IgnoreLease { + if rr == nil || len(rr.KVs) == 0 { + // ignore_{lease,value} flag expects previous key-value pair + return nil, ErrKeyNotFound + } + } + if p.IgnoreValue { + val = rr.KVs[0].Value + } + if p.IgnoreLease { + leaseID = lease.LeaseID(rr.KVs[0].Lease) + } + if p.PrevKv { + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] + } + } + + resp.Header.Revision = txn.Put(p.Key, val, leaseID) + return resp, nil +} + +func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + resp := &pb.DeleteRangeResponse{} + resp.Header = &pb.ResponseHeader{} + + if txn == nil { + txn = a.s.kv.Write() + defer txn.End() + } + + if isGteRange(dr.RangeEnd) { + dr.RangeEnd = []byte{} + } + + if dr.PrevKv { + rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + if rr != nil { + for i := range rr.KVs { + resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + } + } + } + + resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) + return resp, nil +} + +func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + resp := &pb.RangeResponse{} + resp.Header = &pb.ResponseHeader{} + + if txn == nil { + txn = a.s.kv.Read() + defer txn.End() + } + + if isGteRange(r.RangeEnd) { + r.RangeEnd = []byte{} + } + + limit := r.Limit + if r.SortOrder != pb.RangeRequest_NONE || + r.MinModRevision != 0 || r.MaxModRevision != 0 || + r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { + // fetch everything; sort and truncate afterwards + limit = 0 + } + if limit > 0 { + // fetch one extra for 'more' flag + limit = limit + 1 + } + + ro := mvcc.RangeOptions{ + Limit: limit, + Rev: r.Revision, + Count: r.CountOnly, + } + + rr, err := txn.Range(r.Key, r.RangeEnd, ro) + if err != nil { + return nil, err + } + + if r.MaxModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } + pruneKVs(rr, f) + } + if r.MinModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } + pruneKVs(rr, f) + } + if r.MaxCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } + pruneKVs(rr, f) + } + if r.MinCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } + pruneKVs(rr, f) + } + + sortOrder := r.SortOrder + if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { + // Since current mvcc.Range implementation returns results + // sorted by keys in lexiographically ascending order, + // sort ASCEND by default only when target is not 'KEY' + sortOrder = pb.RangeRequest_ASCEND + } + if sortOrder != pb.RangeRequest_NONE { + var sorter sort.Interface + switch { + case r.SortTarget == pb.RangeRequest_KEY: + sorter = &kvSortByKey{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VERSION: + sorter = &kvSortByVersion{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_CREATE: + sorter = &kvSortByCreate{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_MOD: + sorter = &kvSortByMod{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VALUE: + sorter = &kvSortByValue{&kvSort{rr.KVs}} + } + switch { + case sortOrder == pb.RangeRequest_ASCEND: + sort.Sort(sorter) + case sortOrder == pb.RangeRequest_DESCEND: + sort.Sort(sort.Reverse(sorter)) + } + } + + if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { + rr.KVs = rr.KVs[:r.Limit] + resp.More = true + } + + resp.Header.Revision = rr.Rev + resp.Count = int64(rr.Count) + for i := range rr.KVs { + if r.KeysOnly { + rr.KVs[i].Value = nil + } + resp.Kvs = append(resp.Kvs, &rr.KVs[i]) + } + return resp, nil +} + +func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { + isWrite := !isTxnReadonly(rt) + txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) + + reqs, ok := a.compareToOps(txn, rt) + if isWrite { + if err := a.checkRequestPut(txn, reqs); err != nil { + txn.End() + return nil, err + } + } + if err := checkRequestRange(txn, reqs); err != nil { + txn.End() + return nil, err + } + + resps := make([]*pb.ResponseOp, len(reqs)) + txnResp := &pb.TxnResponse{ + Responses: resps, + Succeeded: ok, + Header: &pb.ResponseHeader{}, + } + + // When executing mutable txn ops, etcd must hold the txn lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // be the revision of the write txn. + if isWrite { + txn.End() + txn = a.s.KV().Write() + } + for i := range reqs { + resps[i] = a.applyUnion(txn, reqs[i]) + } + rev := txn.Rev() + if len(txn.Changes()) != 0 { + rev++ + } + txn.End() + + txnResp.Header.Revision = rev + return txnResp, nil +} + +func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) { + for _, c := range rt.Compare { + if !applyCompare(rv, c) { + return rt.Failure, false + } + } + return rt.Success, true +} + +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return false + } + var ckv mvccpb.KeyValue + if len(rr.KVs) != 0 { + ckv = rr.KVs[0] + } else { + // Use the zero value of ckv normally. However... + if c.Target == pb.Compare_VALUE { + // Always fail if we're comparing a value on a key that doesn't exist. + // We can treat non-existence as the empty set explicitly, such that + // even a key with a value of length 0 bytes is still a real key + // that was written that way + return false + } + } + + // -1 is less, 0 is equal, 1 is greater + var result int + switch c.Target { + case pb.Compare_VALUE: + tv, _ := c.TargetUnion.(*pb.Compare_Value) + if tv != nil { + result = bytes.Compare(ckv.Value, tv.Value) + } + case pb.Compare_CREATE: + tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision) + if tv != nil { + result = compareInt64(ckv.CreateRevision, tv.CreateRevision) + } + + case pb.Compare_MOD: + tv, _ := c.TargetUnion.(*pb.Compare_ModRevision) + if tv != nil { + result = compareInt64(ckv.ModRevision, tv.ModRevision) + } + case pb.Compare_VERSION: + tv, _ := c.TargetUnion.(*pb.Compare_Version) + if tv != nil { + result = compareInt64(ckv.Version, tv.Version) + } + } + + switch c.Result { + case pb.Compare_EQUAL: + return result == 0 + case pb.Compare_NOT_EQUAL: + return result != 0 + case pb.Compare_GREATER: + return result > 0 + case pb.Compare_LESS: + return result < 0 + } + return true +} + +func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { + switch tv := union.Request.(type) { + case *pb.RequestOp_RequestRange: + if tv.RequestRange != nil { + resp, err := a.Range(txn, tv.RequestRange) + if err != nil { + plog.Panicf("unexpected error during txn: %v", err) + } + return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}} + } + case *pb.RequestOp_RequestPut: + if tv.RequestPut != nil { + resp, err := a.Put(txn, tv.RequestPut) + if err != nil { + plog.Panicf("unexpected error during txn: %v", err) + } + return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}} + } + case *pb.RequestOp_RequestDeleteRange: + if tv.RequestDeleteRange != nil { + resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) + if err != nil { + plog.Panicf("unexpected error during txn: %v", err) + } + return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}} + } + default: + // empty union + return nil + } + return nil + +} + +func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) { + resp := &pb.CompactionResponse{} + resp.Header = &pb.ResponseHeader{} + ch, err := a.s.KV().Compact(compaction.Revision) + if err != nil { + return nil, ch, err + } + // get the current revision. which key to get is not important. + rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{}) + resp.Header.Revision = rr.Rev + return resp, ch, err +} + +func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) + resp := &pb.LeaseGrantResponse{} + if err == nil { + resp.ID = int64(l.ID) + resp.TTL = l.TTL() + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) + return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err +} + +func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp := &pb.AlarmResponse{} + oldCount := len(a.s.alarmStore.Get(ar.Alarm)) + + switch ar.Action { + case pb.AlarmRequest_GET: + resp.Alarms = a.s.alarmStore.Get(ar.Alarm) + case pb.AlarmRequest_ACTIVATE: + m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) + if m == nil { + break + } + resp.Alarms = append(resp.Alarms, m) + activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1 + if !activated { + break + } + + switch m.Alarm { + case pb.AlarmType_NOSPACE: + plog.Warningf("alarm raised %+v", m) + a.s.applyV3 = newApplierV3Capped(a) + default: + plog.Errorf("unimplemented alarm activation (%+v)", m) + } + case pb.AlarmRequest_DEACTIVATE: + m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) + if m == nil { + break + } + resp.Alarms = append(resp.Alarms, m) + deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0 + if !deactivated { + break + } + + switch m.Alarm { + case pb.AlarmType_NOSPACE: + plog.Infof("alarm disarmed %+v", ar) + a.s.applyV3 = a.s.newApplierV3() + default: + plog.Errorf("unimplemented alarm deactivation (%+v)", m) + } + default: + return nil, nil + } + return resp, nil +} + +type applierV3Capped struct { + applierV3 + q backendQuota +} + +// newApplierV3Capped creates an applyV3 that will reject Puts and transactions +// with Puts so that the number of keys in the store is capped. +func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } + +func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { + return nil, ErrNoSpace +} + +func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) { + if a.q.Cost(r) > 0 { + return nil, ErrNoSpace + } + return a.applierV3.Txn(r) +} + +func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + return nil, ErrNoSpace +} + +func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { + err := a.s.AuthStore().AuthEnable() + if err != nil { + return nil, err + } + return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil +} + +func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { + a.s.AuthStore().AuthDisable() + return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil +} + +func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { + ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) + resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + resp, err := a.s.AuthStore().UserAdd(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := a.s.AuthStore().UserDelete(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + resp, err := a.s.AuthStore().UserChangePassword(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := a.s.AuthStore().UserGrantRole(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := a.s.AuthStore().UserGet(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := a.s.AuthStore().UserRevokeRole(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := a.s.AuthStore().RoleAdd(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := a.s.AuthStore().RoleGrantPermission(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := a.s.AuthStore().RoleGet(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := a.s.AuthStore().RoleRevokePermission(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := a.s.AuthStore().RoleDelete(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := a.s.AuthStore().UserList(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := a.s.AuthStore().RoleList(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +type quotaApplierV3 struct { + applierV3 + q Quota +} + +func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { + return "aApplierV3{app, NewBackendQuota(s)} +} + +func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { + ok := a.q.Available(p) + resp, err := a.applierV3.Put(txn, p) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, err +} + +func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { + ok := a.q.Available(rt) + resp, err := a.applierV3.Txn(rt) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, err +} + +func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + ok := a.q.Available(lc) + resp, err := a.applierV3.LeaseGrant(lc) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, err +} + +type kvSort struct{ kvs []mvccpb.KeyValue } + +func (s *kvSort) Swap(i, j int) { + t := s.kvs[i] + s.kvs[i] = s.kvs[j] + s.kvs[j] = t +} +func (s *kvSort) Len() int { return len(s.kvs) } + +type kvSortByKey struct{ *kvSort } + +func (s *kvSortByKey) Less(i, j int) bool { + return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 +} + +type kvSortByVersion struct{ *kvSort } + +func (s *kvSortByVersion) Less(i, j int) bool { + return (s.kvs[i].Version - s.kvs[j].Version) < 0 +} + +type kvSortByCreate struct{ *kvSort } + +func (s *kvSortByCreate) Less(i, j int) bool { + return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 +} + +type kvSortByMod struct{ *kvSort } + +func (s *kvSortByMod) Less(i, j int) bool { + return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 +} + +type kvSortByValue struct{ *kvSort } + +func (s *kvSortByValue) Less(i, j int) bool { + return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 +} + +func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + tv, ok := requ.Request.(*pb.RequestOp_RequestPut) + if !ok { + continue + } + preq := tv.RequestPut + if preq == nil { + continue + } + if preq.IgnoreValue || preq.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return err + } + if rr == nil || len(rr.KVs) == 0 { + return ErrKeyNotFound + } + } + if lease.LeaseID(preq.Lease) == lease.NoLease { + continue + } + if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { + return lease.ErrLeaseNotFound + } + } + return nil +} + +func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + tv, ok := requ.Request.(*pb.RequestOp_RequestRange) + if !ok { + continue + } + greq := tv.RequestRange + if greq == nil || greq.Revision == 0 { + continue + } + + if greq.Revision > rv.Rev() { + return mvcc.ErrFutureRev + } + if greq.Revision < rv.FirstRev() { + return mvcc.ErrCompacted + } + } + return nil +} + +func compareInt64(a, b int64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +// isGteRange determines if the range end is a >= range. This works around grpc +// sending empty byte strings as nil; >= is encoded in the range end as '\0'. +func isGteRange(rangeEnd []byte) bool { + return len(rangeEnd) == 1 && rangeEnd[0] == 0 +} + +func noSideEffect(r *pb.InternalRaftRequest) bool { + return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil +} + +func removeNeedlessRangeReqs(txn *pb.TxnRequest) { + f := func(ops []*pb.RequestOp) []*pb.RequestOp { + j := 0 + for i := 0; i < len(ops); i++ { + if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { + continue + } + ops[j] = ops[i] + j++ + } + + return ops[:j] + } + + txn.Success = f(txn.Success) + txn.Failure = f(txn.Failure) +} + +func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { + j := 0 + for i := range rr.KVs { + rr.KVs[j] = rr.KVs[i] + if !isPrunable(&rr.KVs[i]) { + j++ + } + } + rr.KVs = rr.KVs[:j] +} + +func newHeader(s *EtcdServer) *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(s.Cluster().ID()), + MemberId: uint64(s.ID()), + Revision: s.KV().Rev(), + RaftTerm: s.Term(), + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go new file mode 100644 index 000000000000..7da4ae45df5d --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go @@ -0,0 +1,196 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "sync" + + "github.com/coreos/etcd/auth" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" +) + +type authApplierV3 struct { + applierV3 + as auth.AuthStore + + // mu serializes Apply so that user isn't corrupted and so that + // serialized requests don't leak data from TOCTOU errors + mu sync.Mutex + + authInfo auth.AuthInfo +} + +func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 { + return &authApplierV3{applierV3: base, as: as} +} + +func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { + aa.mu.Lock() + defer aa.mu.Unlock() + if r.Header != nil { + // backward-compatible with pre-3.0 releases when internalRaftRequest + // does not have header field + aa.authInfo.Username = r.Header.Username + aa.authInfo.Revision = r.Header.AuthRevision + } + if needAdminPermission(r) { + if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return &applyResult{err: err} + } + } + ret := aa.applierV3.Apply(r) + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return ret +} + +func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { + if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { + return nil, err + } + if r.PrevKv { + err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil) + if err != nil { + return nil, err + } + } + return aa.applierV3.Put(txn, r) +} + +func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { + return nil, err + } + return aa.applierV3.Range(txn, r) +} + +func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { + return nil, err + } + if r.PrevKv { + err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd) + if err != nil { + return nil, err + } + } + + return aa.applierV3.DeleteRange(txn, r) +} + +func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + switch tv := requ.Request.(type) { + case *pb.RequestOp_RequestRange: + if tv.RequestRange == nil { + continue + } + + if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { + return err + } + + case *pb.RequestOp_RequestPut: + if tv.RequestPut == nil { + continue + } + + if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { + return err + } + + case *pb.RequestOp_RequestDeleteRange: + if tv.RequestDeleteRange == nil { + continue + } + + if tv.RequestDeleteRange.PrevKv { + err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) + if err != nil { + return err + } + } + + err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) + if err != nil { + return err + } + } + } + + return nil +} + +func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { + for _, c := range rt.Compare { + if err := as.IsRangePermitted(ai, c.Key, nil); err != nil { + return err + } + } + if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { + return err + } + if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil { + return err + } + return nil +} + +func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { + if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil { + return nil, err + } + return aa.applierV3.Txn(rt) +} + +func needAdminPermission(r *pb.InternalRaftRequest) bool { + switch { + case r.AuthEnable != nil: + return true + case r.AuthDisable != nil: + return true + case r.AuthUserAdd != nil: + return true + case r.AuthUserDelete != nil: + return true + case r.AuthUserChangePassword != nil: + return true + case r.AuthUserGrantRole != nil: + return true + case r.AuthUserGet != nil: + return true + case r.AuthUserRevokeRole != nil: + return true + case r.AuthRoleAdd != nil: + return true + case r.AuthRoleGrantPermission != nil: + return true + case r.AuthRoleGet != nil: + return true + case r.AuthRoleRevokePermission != nil: + return true + case r.AuthRoleDelete != nil: + return true + case r.AuthUserList != nil: + return true + case r.AuthRoleList != nil: + return true + default: + return false + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go new file mode 100644 index 000000000000..f278efca88e5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go @@ -0,0 +1,140 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "path" + "time" + + "github.com/coreos/etcd/etcdserver/api" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/store" + "github.com/coreos/go-semver/semver" +) + +// ApplierV2 is the interface for processing V2 raft messages +type ApplierV2 interface { + Delete(r *pb.Request) Response + Post(r *pb.Request) Response + Put(r *pb.Request) Response + QGet(r *pb.Request) Response + Sync(r *pb.Request) Response +} + +func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 { + return &applierV2store{store: s, cluster: c} +} + +type applierV2store struct { + store store.Store + cluster *membership.RaftCluster +} + +func (a *applierV2store) Delete(r *pb.Request) Response { + switch { + case r.PrevIndex > 0 || r.PrevValue != "": + return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex)) + default: + return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive)) + } +} + +func (a *applierV2store) Post(r *pb.Request) Response { + return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r))) +} + +func (a *applierV2store) Put(r *pb.Request) Response { + ttlOptions := toTTLOptions(r) + exists, existsSet := pbutil.GetBool(r.PrevExist) + switch { + case existsSet: + if exists { + if r.PrevIndex == 0 && r.PrevValue == "" { + return toResponse(a.store.Update(r.Path, r.Val, ttlOptions)) + } + return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) + } + return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions)) + case r.PrevIndex > 0 || r.PrevValue != "": + return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) + default: + if storeMemberAttributeRegexp.MatchString(r.Path) { + id := membership.MustParseMemberIDFromKey(path.Dir(r.Path)) + var attr membership.Attributes + if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { + plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) + } + if a.cluster != nil { + a.cluster.UpdateAttributes(id, attr) + } + // return an empty response since there is no consumer. + return Response{} + } + if r.Path == membership.StoreClusterVersionKey() { + if a.cluster != nil { + a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability) + } + // return an empty response since there is no consumer. + return Response{} + } + return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions)) + } +} + +func (a *applierV2store) QGet(r *pb.Request) Response { + return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted)) +} + +func (a *applierV2store) Sync(r *pb.Request) Response { + a.store.DeleteExpiredKeys(time.Unix(0, r.Time)) + return Response{} +} + +// applyV2Request interprets r as a call to store.X and returns a Response interpreted +// from store.Event +func (s *EtcdServer) applyV2Request(r *pb.Request) Response { + toTTLOptions(r) + switch r.Method { + case "POST": + return s.applyV2.Post(r) + case "PUT": + return s.applyV2.Put(r) + case "DELETE": + return s.applyV2.Delete(r) + case "QGET": + return s.applyV2.QGet(r) + case "SYNC": + return s.applyV2.Sync(r) + default: + // This should never be reached, but just in case: + return Response{err: ErrUnknownMethod} + } +} + +func toTTLOptions(r *pb.Request) store.TTLOptionSet { + refresh, _ := pbutil.GetBool(r.Refresh) + ttlOptions := store.TTLOptionSet{Refresh: refresh} + if r.Expiration != 0 { + ttlOptions.ExpireTime = time.Unix(0, r.Expiration) + } + return ttlOptions +} + +func toResponse(ev *store.Event, err error) Response { + return Response{Event: ev, err: err} +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go new file mode 100644 index 000000000000..c5e2dabf3e71 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/backend.go @@ -0,0 +1,81 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "os" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" +) + +func newBackend(cfg *ServerConfig) backend.Backend { + bcfg := backend.DefaultBackendConfig() + bcfg.Path = cfg.backendPath() + if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { + // permit 10% excess over quota for disarm + bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) + } + return backend.New(bcfg) +} + +// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { + snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) + if err != nil { + return nil, fmt.Errorf("database snapshot file path error: %v", err) + } + if err := os.Rename(snapPath, cfg.backendPath()); err != nil { + return nil, fmt.Errorf("rename snapshot file error: %v", err) + } + return openBackend(cfg), nil +} + +// openBackend returns a backend using the current etcd db. +func openBackend(cfg *ServerConfig) backend.Backend { + fn := cfg.backendPath() + beOpened := make(chan backend.Backend) + go func() { + beOpened <- newBackend(cfg) + }() + select { + case be := <-beOpened: + return be + case <-time.After(time.Second): + plog.Warningf("another etcd process is using %q and holds the file lock.", fn) + plog.Warningf("waiting for it to exit before starting...") + } + return <-beOpened +} + +// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// before updating the backend db after persisting raft snapshot to disk, +// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this +// case, replace the db with the snapshot db sent by the leader. +func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { + var cIndex consistentIndex + kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) + defer kv.Close() + if snapshot.Metadata.Index <= kv.ConsistentIndex() { + return oldbe, nil + } + oldbe.Close() + return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go new file mode 100644 index 000000000000..f44862a46380 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go @@ -0,0 +1,258 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "sort" + "time" + + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/version" + "github.com/coreos/go-semver/semver" +) + +// isMemberBootstrapped tries to check if the given member has been bootstrapped +// in the given cluster. +func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { + rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt) + if err != nil { + return false + } + id := cl.MemberByName(member).ID + m := rcl.Member(id) + if m == nil { + return false + } + if len(m.ClientURLs) > 0 { + return true + } + return false +} + +// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and +// attempts to construct a Cluster by accessing the members endpoint on one of +// these URLs. The first URL to provide a response is used. If no URLs provide +// a response, or a Cluster cannot be successfully created from a received +// response, an error is returned. +// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, +// 10 second is enough for building connection and finishing request. +func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { + return getClusterFromRemotePeers(urls, 10*time.Second, true, rt) +} + +// If logerr is true, it prints out more error messages. +func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { + cc := &http.Client{ + Transport: rt, + Timeout: timeout, + } + for _, u := range urls { + resp, err := cc.Get(u + "/members") + if err != nil { + if logerr { + plog.Warningf("could not get cluster response from %s: %v", u, err) + } + continue + } + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + if logerr { + plog.Warningf("could not read the body of cluster response: %v", err) + } + continue + } + var membs []*membership.Member + if err = json.Unmarshal(b, &membs); err != nil { + if logerr { + plog.Warningf("could not unmarshal cluster response: %v", err) + } + continue + } + id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) + if err != nil { + if logerr { + plog.Warningf("could not parse the cluster ID from cluster res: %v", err) + } + continue + } + + // check the length of membership members + // if the membership members are present then prepare and return raft cluster + // if membership members are not present then the raft cluster formed will be + // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error + if len(membs) > 0 { + return membership.NewClusterFromMembers("", id, membs), nil + } + + return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.") + } + return nil, fmt.Errorf("could not retrieve cluster information from the given urls") +} + +// getRemotePeerURLs returns peer urls of remote members in the cluster. The +// returned list is sorted in ascending lexicographical order. +func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { + us := make([]string, 0) + for _, m := range cl.Members() { + if m.Name == local { + continue + } + us = append(us, m.PeerURLs...) + } + sort.Strings(us) + return us +} + +// getVersions returns the versions of the members in the given cluster. +// The key of the returned map is the member's ID. The value of the returned map +// is the semver versions string, including server and cluster. +// If it fails to get the version of a member, the key will be nil. +func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { + members := cl.Members() + vers := make(map[string]*version.Versions) + for _, m := range members { + if m.ID == local { + cv := "not_decided" + if cl.Version() != nil { + cv = cl.Version().String() + } + vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} + continue + } + ver, err := getVersion(m, rt) + if err != nil { + plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) + vers[m.ID.String()] = nil + } else { + vers[m.ID.String()] = ver + } + } + return vers +} + +// decideClusterVersion decides the cluster version based on the versions map. +// The returned version is the min server version in the map, or nil if the min +// version in unknown. +func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { + var cv *semver.Version + lv := semver.Must(semver.NewVersion(version.Version)) + + for mid, ver := range vers { + if ver == nil { + return nil + } + v, err := semver.NewVersion(ver.Server) + if err != nil { + plog.Errorf("cannot understand the version of member %s (%v)", mid, err) + return nil + } + if lv.LessThan(*v) { + plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) + plog.Warningf("member %s has a higher version %s", mid, ver.Server) + } + if cv == nil { + cv = v + } else if v.LessThan(*cv) { + cv = v + } + } + return cv +} + +// isCompatibleWithCluster return true if the local member has a compatible version with +// the current running cluster. +// The version is considered as compatible when at least one of the other members in the cluster has a +// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version +// out of the range. +// We set this rule since when the local member joins, another member might be offline. +func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { + vers := getVersions(cl, local, rt) + minV := semver.Must(semver.NewVersion(version.MinClusterVersion)) + maxV := semver.Must(semver.NewVersion(version.Version)) + maxV = &semver.Version{ + Major: maxV.Major, + Minor: maxV.Minor, + } + + return isCompatibleWithVers(vers, local, minV, maxV) +} + +func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { + var ok bool + for id, v := range vers { + // ignore comparison with local version + if id == local.String() { + continue + } + if v == nil { + continue + } + clusterv, err := semver.NewVersion(v.Cluster) + if err != nil { + plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) + continue + } + if clusterv.LessThan(*minV) { + plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) + return false + } + if maxV.LessThan(*clusterv) { + plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) + return false + } + ok = true + } + return ok +} + +// getVersion returns the Versions of the given member via its +// peerURLs. Returns the last error if it fails to get the version. +func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { + cc := &http.Client{ + Transport: rt, + } + var ( + err error + resp *http.Response + ) + + for _, u := range m.PeerURLs { + resp, err = cc.Get(u + "/version") + if err != nil { + plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) + continue + } + var b []byte + b, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) + continue + } + var vers version.Versions + if err = json.Unmarshal(b, &vers); err != nil { + plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) + continue + } + return &vers, nil + } + return nil, err +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go new file mode 100644 index 000000000000..ae8a4d08e355 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/config.go @@ -0,0 +1,209 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/coreos/etcd/pkg/netutil" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" +) + +// ServerConfig holds the configuration of etcd as taken from the command line or discovery. +type ServerConfig struct { + Name string + DiscoveryURL string + DiscoveryProxy string + ClientURLs types.URLs + PeerURLs types.URLs + DataDir string + // DedicatedWALDir config will make the etcd to write the WAL to the WALDir + // rather than the dataDir/member/wal. + DedicatedWALDir string + SnapCount uint64 + MaxSnapFiles uint + MaxWALFiles uint + InitialPeerURLsMap types.URLsMap + InitialClusterToken string + NewCluster bool + ForceNewCluster bool + PeerTLSInfo transport.TLSInfo + + TickMs uint + ElectionTicks int + BootstrapTimeout time.Duration + + AutoCompactionRetention int + QuotaBackendBytes int64 + + // MaxRequestBytes is the maximum request size to send over raft. + MaxRequestBytes uint + + StrictReconfigCheck bool + + // ClientCertAuthEnabled is true when cert has been signed by the client CA. + ClientCertAuthEnabled bool + + AuthToken string + + Debug bool +} + +// VerifyBootstrap sanity-checks the initial config for bootstrap case +// and returns an error for things that should never happen. +func (c *ServerConfig) VerifyBootstrap() error { + if err := c.hasLocalMember(); err != nil { + return err + } + if err := c.advertiseMatchesCluster(); err != nil { + return err + } + if checkDuplicateURL(c.InitialPeerURLsMap) { + return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) + } + if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" { + return fmt.Errorf("initial cluster unset and no discovery URL found") + } + return nil +} + +// VerifyJoinExisting sanity-checks the initial config for join existing cluster +// case and returns an error for things that should never happen. +func (c *ServerConfig) VerifyJoinExisting() error { + // The member has announced its peer urls to the cluster before starting; no need to + // set the configuration again. + if err := c.hasLocalMember(); err != nil { + return err + } + if checkDuplicateURL(c.InitialPeerURLsMap) { + return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) + } + if c.DiscoveryURL != "" { + return fmt.Errorf("discovery URL should not be set when joining existing initial cluster") + } + return nil +} + +// hasLocalMember checks that the cluster at least contains the local server. +func (c *ServerConfig) hasLocalMember() error { + if urls := c.InitialPeerURLsMap[c.Name]; urls == nil { + return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name) + } + return nil +} + +// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list. +func (c *ServerConfig) advertiseMatchesCluster() error { + urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice() + urls.Sort() + sort.Strings(apurls) + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) { + umap := map[string]types.URLs{c.Name: c.PeerURLs} + return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ",")) + } + return nil +} + +func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") } + +func (c *ServerConfig) WALDir() string { + if c.DedicatedWALDir != "" { + return c.DedicatedWALDir + } + return filepath.Join(c.MemberDir(), "wal") +} + +func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") } + +func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" } + +// ReqTimeout returns timeout for request to finish. +func (c *ServerConfig) ReqTimeout() time.Duration { + // 5s for queue waiting, computation and disk IO delay + // + 2 * election timeout for possible leader election + return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond +} + +func (c *ServerConfig) electionTimeout() time.Duration { + return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond +} + +func (c *ServerConfig) peerDialTimeout() time.Duration { + // 1s for queue wait and system delay + // + one RTT, which is smaller than 1/5 election timeout + return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5 +} + +func (c *ServerConfig) PrintWithInitial() { c.print(true) } + +func (c *ServerConfig) Print() { c.print(false) } + +func (c *ServerConfig) print(initial bool) { + plog.Infof("name = %s", c.Name) + if c.ForceNewCluster { + plog.Infof("force new cluster") + } + plog.Infof("data dir = %s", c.DataDir) + plog.Infof("member dir = %s", c.MemberDir()) + if c.DedicatedWALDir != "" { + plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir) + } + plog.Infof("heartbeat = %dms", c.TickMs) + plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs)) + plog.Infof("snapshot count = %d", c.SnapCount) + if len(c.DiscoveryURL) != 0 { + plog.Infof("discovery URL= %s", c.DiscoveryURL) + if len(c.DiscoveryProxy) != 0 { + plog.Infof("discovery proxy = %s", c.DiscoveryProxy) + } + } + plog.Infof("advertise client URLs = %s", c.ClientURLs) + if initial { + plog.Infof("initial advertise peer URLs = %s", c.PeerURLs) + plog.Infof("initial cluster = %s", c.InitialPeerURLsMap) + } +} + +func checkDuplicateURL(urlsmap types.URLsMap) bool { + um := make(map[string]bool) + for _, urls := range urlsmap { + for _, url := range urls { + u := url.String() + if um[u] { + return true + } + um[u] = true + } + } + return false +} + +func (c *ServerConfig) bootstrapTimeout() time.Duration { + if c.BootstrapTimeout != 0 { + return c.BootstrapTimeout + } + return time.Second +} + +func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go new file mode 100644 index 000000000000..d513f6708d33 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go @@ -0,0 +1,33 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "sync/atomic" +) + +// consistentIndex represents the offset of an entry in a consistent replica log. +// It implements the mvcc.ConsistentIndexGetter interface. +// It is always set to the offset of current entry before executing the entry, +// so ConsistentWatchableKV could get the consistent index from it. +type consistentIndex uint64 + +func (i *consistentIndex) setConsistentIndex(v uint64) { + atomic.StoreUint64((*uint64)(i), v) +} + +func (i *consistentIndex) ConsistentIndex() uint64 { + return atomic.LoadUint64((*uint64)(i)) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/github.com/coreos/etcd/etcdserver/doc.go new file mode 100644 index 000000000000..b195d2d167a6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package etcdserver defines how etcd servers interact and store their states. +package etcdserver diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go new file mode 100644 index 000000000000..ed749dbe8d88 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/errors.go @@ -0,0 +1,46 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "errors" + "fmt" +) + +var ( + ErrUnknownMethod = errors.New("etcdserver: unknown method") + ErrStopped = errors.New("etcdserver: server stopped") + ErrCanceled = errors.New("etcdserver: request cancelled") + ErrTimeout = errors.New("etcdserver: request timed out") + ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") + ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") + ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") + ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") + ErrNoLeader = errors.New("etcdserver: no leader") + ErrRequestTooLarge = errors.New("etcdserver: request is too large") + ErrNoSpace = errors.New("etcdserver: no space") + ErrTooManyRequests = errors.New("etcdserver: too many requests") + ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") + ErrKeyNotFound = errors.New("etcdserver: key not found") +) + +type DiscoveryError struct { + Op string + Err error +} + +func (e DiscoveryError) Error() string { + return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD new file mode 100644 index 000000000000..672f3c515a56 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "etcdserver.pb.go", + "raft_internal.pb.go", + "rpc.pb.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go new file mode 100644 index 000000000000..aabf90061f6e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -0,0 +1,1045 @@ +// Code generated by protoc-gen-gogo. +// source: etcdserver.proto +// DO NOT EDIT! + +/* + Package etcdserverpb is a generated protocol buffer package. + + It is generated from these files: + etcdserver.proto + raft_internal.proto + rpc.proto + + It has these top-level messages: + Request + Metadata + RequestHeader + InternalRaftRequest + EmptyResponse + InternalAuthenticateRequest + ResponseHeader + RangeRequest + RangeResponse + PutRequest + PutResponse + DeleteRangeRequest + DeleteRangeResponse + RequestOp + ResponseOp + Compare + TxnRequest + TxnResponse + CompactionRequest + CompactionResponse + HashRequest + HashResponse + SnapshotRequest + SnapshotResponse + WatchRequest + WatchCreateRequest + WatchCancelRequest + WatchResponse + LeaseGrantRequest + LeaseGrantResponse + LeaseRevokeRequest + LeaseRevokeResponse + LeaseKeepAliveRequest + LeaseKeepAliveResponse + LeaseTimeToLiveRequest + LeaseTimeToLiveResponse + Member + MemberAddRequest + MemberAddResponse + MemberRemoveRequest + MemberRemoveResponse + MemberUpdateRequest + MemberUpdateResponse + MemberListRequest + MemberListResponse + DefragmentRequest + DefragmentResponse + AlarmRequest + AlarmMember + AlarmResponse + StatusRequest + StatusResponse + AuthEnableRequest + AuthDisableRequest + AuthenticateRequest + AuthUserAddRequest + AuthUserGetRequest + AuthUserDeleteRequest + AuthUserChangePasswordRequest + AuthUserGrantRoleRequest + AuthUserRevokeRoleRequest + AuthRoleAddRequest + AuthRoleGetRequest + AuthUserListRequest + AuthRoleListRequest + AuthRoleDeleteRequest + AuthRoleGrantPermissionRequest + AuthRoleRevokePermissionRequest + AuthEnableResponse + AuthDisableResponse + AuthenticateResponse + AuthUserAddResponse + AuthUserGetResponse + AuthUserDeleteResponse + AuthUserChangePasswordResponse + AuthUserGrantRoleResponse + AuthUserRevokeRoleResponse + AuthRoleAddResponse + AuthRoleGetResponse + AuthRoleListResponse + AuthUserListResponse + AuthRoleDeleteResponse + AuthRoleGrantPermissionResponse + AuthRoleRevokePermissionResponse +*/ +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Request struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` + Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } + +type Metadata struct { + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } + +func init() { + proto.RegisterType((*Request)(nil), "etcdserverpb.Request") + proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") +} +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x12 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) + i += copy(dAtA[i:], m.Method) + dAtA[i] = 0x1a + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x22 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + dAtA[i] = 0x28 + i++ + if m.Dir { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x32 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) + i += copy(dAtA[i:], m.PrevValue) + dAtA[i] = 0x38 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) + if m.PrevExist != nil { + dAtA[i] = 0x40 + i++ + if *m.PrevExist { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + dAtA[i] = 0x48 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) + dAtA[i] = 0x50 + i++ + if m.Wait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) + dAtA[i] = 0x60 + i++ + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x68 + i++ + if m.Sorted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x70 + i++ + if m.Quorum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x78 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if m.Stream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.Refresh != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + if *m.Refresh { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Request) Size() (n int) { + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.ID)) + l = len(m.Method) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Path) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Val) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 2 + l = len(m.PrevValue) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 1 + sovEtcdserver(uint64(m.PrevIndex)) + if m.PrevExist != nil { + n += 2 + } + n += 1 + sovEtcdserver(uint64(m.Expiration)) + n += 2 + n += 1 + sovEtcdserver(uint64(m.Since)) + n += 2 + n += 2 + n += 2 + n += 1 + sovEtcdserver(uint64(m.Time)) + n += 3 + if m.Refresh != nil { + n += 3 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metadata) Size() (n int) { + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.NodeID)) + n += 1 + sovEtcdserver(uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEtcdserver(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEtcdserver(x uint64) (n int) { + return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Method = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Dir = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) + } + m.PrevIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrevIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PrevExist = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) + } + m.Expiration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Expiration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Wait = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Sorted = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Quorum = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stream = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Refresh = &b + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + m.ClusterID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEtcdserver(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEtcdserver + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEtcdserver(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } + +var fileDescriptorEtcdserver = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, + 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, + 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, + 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, + 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, + 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, + 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, + 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, + 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, + 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, + 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, + 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, + 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, + 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, + 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, + 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, + 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, + 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, + 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, + 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, + 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, + 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto new file mode 100644 index 000000000000..25e0aca5d9f7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto @@ -0,0 +1,34 @@ +syntax = "proto2"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message Request { + optional uint64 ID = 1 [(gogoproto.nullable) = false]; + optional string Method = 2 [(gogoproto.nullable) = false]; + optional string Path = 3 [(gogoproto.nullable) = false]; + optional string Val = 4 [(gogoproto.nullable) = false]; + optional bool Dir = 5 [(gogoproto.nullable) = false]; + optional string PrevValue = 6 [(gogoproto.nullable) = false]; + optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false]; + optional bool PrevExist = 8 [(gogoproto.nullable) = true]; + optional int64 Expiration = 9 [(gogoproto.nullable) = false]; + optional bool Wait = 10 [(gogoproto.nullable) = false]; + optional uint64 Since = 11 [(gogoproto.nullable) = false]; + optional bool Recursive = 12 [(gogoproto.nullable) = false]; + optional bool Sorted = 13 [(gogoproto.nullable) = false]; + optional bool Quorum = 14 [(gogoproto.nullable) = false]; + optional int64 Time = 15 [(gogoproto.nullable) = false]; + optional bool Stream = 16 [(gogoproto.nullable) = false]; + optional bool Refresh = 17 [(gogoproto.nullable) = true]; +} + +message Metadata { + optional uint64 NodeID = 1 [(gogoproto.nullable) = false]; + optional uint64 ClusterID = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go new file mode 100644 index 000000000000..44a3b6f69eb9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -0,0 +1,2094 @@ +// Code generated by protoc-gen-gogo. +// source: raft_internal.proto +// DO NOT EDIT! + +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RequestHeader struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // username is a username that is associated with an auth token of gRPC connection + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +type InternalRaftRequest struct { + Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } + +type EmptyResponse struct { +} + +func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } +func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } +func (*EmptyResponse) ProtoMessage() {} +func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +type InternalAuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // simple_token is generated in API layer (etcdserver/v3_server.go) + SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` +} + +func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } +func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*InternalAuthenticateRequest) ProtoMessage() {} +func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRaftInternal, []int{3} +} + +func init() { + proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") + proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") + proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") + proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") +} +func (m *RequestHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + } + if len(m.Username) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + } + if m.AuthRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) + } + return i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + } + if m.V2 != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) + n1, err := m.V2.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Range != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) + n2, err := m.Range.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Put != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) + n3, err := m.Put.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.DeleteRange != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) + n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Txn != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) + n5, err := m.Txn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Compaction != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) + n6, err := m.Compaction.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.LeaseGrant != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) + n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.LeaseRevoke != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) + n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Alarm != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) + n9, err := m.Alarm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Header != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x6 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) + n10, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.AuthEnable != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x3e + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) + n11, err := m.AuthEnable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.AuthDisable != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) + n12, err := m.AuthDisable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Authenticate != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) + n13, err := m.Authenticate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.AuthUserAdd != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) + n14, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.AuthUserDelete != nil { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) + n15, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.AuthUserGet != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) + n16, err := m.AuthUserGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.AuthUserChangePassword != nil { + dAtA[i] = 0xfa + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) + n17, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.AuthUserGrantRole != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) + n18, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.AuthUserRevokeRole != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) + n19, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.AuthUserList != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) + n20, err := m.AuthUserList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.AuthRoleList != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) + n21, err := m.AuthRoleList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.AuthRoleAdd != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) + n22, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.AuthRoleDelete != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) + n23, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.AuthRoleGet != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) + n24, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.AuthRoleGrantPermission != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) + n25, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.AuthRoleRevokePermission != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) + n26, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + return i, nil +} + +func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if len(m.SimpleToken) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) + i += copy(dAtA[i:], m.SimpleToken) + } + return i, nil +} + +func encodeFixed64RaftInternal(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32RaftInternal(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RequestHeader) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRevision != 0 { + n += 1 + sovRaftInternal(uint64(m.AuthRevision)) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + if m.V2 != nil { + l = m.V2.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Put != nil { + l = m.Put.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.DeleteRange != nil { + l = m.DeleteRange.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Txn != nil { + l = m.Txn.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Compaction != nil { + l = m.Compaction.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseGrant != nil { + l = m.LeaseGrant.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseRevoke != nil { + l = m.LeaseRevoke.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Alarm != nil { + l = m.Alarm.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Header != nil { + l = m.Header.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthEnable != nil { + l = m.AuthEnable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthDisable != nil { + l = m.AuthDisable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.Authenticate != nil { + l = m.Authenticate.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserAdd != nil { + l = m.AuthUserAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserDelete != nil { + l = m.AuthUserDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGet != nil { + l = m.AuthUserGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserChangePassword != nil { + l = m.AuthUserChangePassword.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGrantRole != nil { + l = m.AuthUserGrantRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserRevokeRole != nil { + l = m.AuthUserRevokeRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserList != nil { + l = m.AuthUserList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleList != nil { + l = m.AuthRoleList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleAdd != nil { + l = m.AuthRoleAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleDelete != nil { + l = m.AuthRoleDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGet != nil { + l = m.AuthRoleGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGrantPermission != nil { + l = m.AuthRoleGrantPermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleRevokePermission != nil { + l = m.AuthRoleRevokePermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + return n +} + +func (m *EmptyResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *InternalAuthenticateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.SimpleToken) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + return n +} + +func sovRaftInternal(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaftInternal(x uint64) (n int) { + return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) + } + m.AuthRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AuthRevision |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.V2 == nil { + m.V2 = &Request{} + } + if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Range == nil { + m.Range = &RangeRequest{} + } + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Put == nil { + m.Put = &PutRequest{} + } + if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteRange == nil { + m.DeleteRange = &DeleteRangeRequest{} + } + if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Txn == nil { + m.Txn = &TxnRequest{} + } + if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Compaction == nil { + m.Compaction = &CompactionRequest{} + } + if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseGrant == nil { + m.LeaseGrant = &LeaseGrantRequest{} + } + if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseRevoke == nil { + m.LeaseRevoke = &LeaseRevokeRequest{} + } + if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Alarm == nil { + m.Alarm = &AlarmRequest{} + } + if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthEnable == nil { + m.AuthEnable = &AuthEnableRequest{} + } + if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1011: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthDisable == nil { + m.AuthDisable = &AuthDisableRequest{} + } + if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1012: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Authenticate == nil { + m.Authenticate = &InternalAuthenticateRequest{} + } + if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserAdd == nil { + m.AuthUserAdd = &AuthUserAddRequest{} + } + if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1101: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserDelete == nil { + m.AuthUserDelete = &AuthUserDeleteRequest{} + } + if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1102: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGet == nil { + m.AuthUserGet = &AuthUserGetRequest{} + } + if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1103: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserChangePassword == nil { + m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} + } + if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1104: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGrantRole == nil { + m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} + } + if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1105: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserRevokeRole == nil { + m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} + } + if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1106: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserList == nil { + m.AuthUserList = &AuthUserListRequest{} + } + if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1107: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleList == nil { + m.AuthRoleList = &AuthRoleListRequest{} + } + if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1200: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleAdd == nil { + m.AuthRoleAdd = &AuthRoleAddRequest{} + } + if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1201: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleDelete == nil { + m.AuthRoleDelete = &AuthRoleDeleteRequest{} + } + if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1202: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGet == nil { + m.AuthRoleGet = &AuthRoleGetRequest{} + } + if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1203: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGrantPermission == nil { + m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} + } + if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1204: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleRevokePermission == nil { + m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} + } + if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SimpleToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaftInternal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaftInternal + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaftInternal(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } + +var fileDescriptorRaftInternal = []byte{ + // 837 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, + 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, + 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, + 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, + 0x0f, 0xd4, 0x17, 0x50, 0xbc, 0xf1, 0xca, 0x1b, 0x7d, 0x00, 0x67, 0x3f, 0x92, 0x34, 0x6d, 0xca, + 0x5d, 0x72, 0xce, 0xff, 0xfc, 0xce, 0xd9, 0xec, 0x7f, 0xbb, 0x45, 0xb3, 0x8c, 0x1e, 0x72, 0xd3, + 0x76, 0x39, 0x30, 0x97, 0x3a, 0xab, 0x3e, 0xf3, 0xb8, 0x87, 0x0b, 0xc0, 0x1b, 0x56, 0x00, 0xac, + 0x0b, 0xcc, 0x3f, 0x28, 0xcd, 0x35, 0xbd, 0xa6, 0x27, 0x13, 0xf7, 0xc4, 0x93, 0xd2, 0x94, 0x66, + 0x62, 0x8d, 0x8e, 0xe4, 0x98, 0xdf, 0x50, 0x8f, 0x95, 0x67, 0x68, 0xda, 0x80, 0x17, 0x1d, 0x08, + 0xf8, 0x16, 0x50, 0x0b, 0x18, 0x2e, 0xa2, 0xec, 0x76, 0x9d, 0x64, 0x16, 0x33, 0x2b, 0x63, 0x46, + 0x76, 0xbb, 0x8e, 0x4b, 0x68, 0xaa, 0x13, 0x88, 0x96, 0x6d, 0x20, 0xd9, 0xc5, 0xcc, 0x4a, 0xce, + 0x88, 0xde, 0xf1, 0x32, 0x9a, 0xa6, 0x1d, 0xde, 0x32, 0x19, 0x74, 0xed, 0xc0, 0xf6, 0x5c, 0x32, + 0x2a, 0xcb, 0x0a, 0x22, 0x68, 0xe8, 0x58, 0xe5, 0x4f, 0x11, 0xcd, 0x6e, 0xeb, 0xa9, 0x0d, 0x7a, + 0xc8, 0x75, 0xbb, 0x81, 0x46, 0xd7, 0x50, 0xb6, 0x5b, 0x95, 0x2d, 0xf2, 0xd5, 0xf9, 0xd5, 0xde, + 0x75, 0xad, 0xea, 0x12, 0x23, 0xdb, 0xad, 0xe2, 0xfb, 0x68, 0x9c, 0x51, 0xb7, 0x09, 0xb2, 0x57, + 0xbe, 0x5a, 0xea, 0x53, 0x8a, 0x54, 0x28, 0x57, 0x42, 0x7c, 0x0b, 0x8d, 0xfa, 0x1d, 0x4e, 0xc6, + 0xa4, 0x9e, 0x24, 0xf5, 0x7b, 0x9d, 0x70, 0x1e, 0x43, 0x88, 0xf0, 0x3a, 0x2a, 0x58, 0xe0, 0x00, + 0x07, 0x53, 0x35, 0x19, 0x97, 0x45, 0x8b, 0xc9, 0xa2, 0xba, 0x54, 0x24, 0x5a, 0xe5, 0xad, 0x38, + 0x26, 0x1a, 0xf2, 0x63, 0x97, 0x4c, 0xa4, 0x35, 0xdc, 0x3f, 0x76, 0xa3, 0x86, 0xfc, 0xd8, 0xc5, + 0x0f, 0x10, 0x6a, 0x78, 0x6d, 0x9f, 0x36, 0xb8, 0xf8, 0x7e, 0x93, 0xb2, 0xe4, 0x6a, 0xb2, 0x64, + 0x3d, 0xca, 0x87, 0x95, 0x3d, 0x25, 0xf8, 0x21, 0xca, 0x3b, 0x40, 0x03, 0x30, 0x9b, 0x8c, 0xba, + 0x9c, 0x4c, 0xa5, 0x11, 0x76, 0x84, 0x60, 0x53, 0xe4, 0x23, 0x82, 0x13, 0x85, 0xc4, 0x9a, 0x15, + 0x81, 0x41, 0xd7, 0x3b, 0x02, 0x92, 0x4b, 0x5b, 0xb3, 0x44, 0x18, 0x52, 0x10, 0xad, 0xd9, 0x89, + 0x63, 0x62, 0x5b, 0xa8, 0x43, 0x59, 0x9b, 0xa0, 0xb4, 0x6d, 0xa9, 0x89, 0x54, 0xb4, 0x2d, 0x52, + 0x88, 0xd7, 0xd0, 0x44, 0x4b, 0x5a, 0x8e, 0x58, 0xb2, 0x64, 0x21, 0x75, 0xcf, 0x95, 0x2b, 0x0d, + 0x2d, 0xc5, 0x35, 0x94, 0x97, 0x8e, 0x03, 0x97, 0x1e, 0x38, 0x40, 0x7e, 0xa7, 0x7e, 0xb0, 0x5a, + 0x87, 0xb7, 0x36, 0xa4, 0x20, 0x5a, 0x2e, 0x8d, 0x42, 0xb8, 0x8e, 0xa4, 0x3f, 0x4d, 0xcb, 0x0e, + 0x24, 0xe3, 0xef, 0x64, 0xda, 0x7a, 0x05, 0xa3, 0xae, 0x14, 0xd1, 0x7a, 0x69, 0x1c, 0xc3, 0xbb, + 0x8a, 0x02, 0x2e, 0xb7, 0x1b, 0x94, 0x03, 0xf9, 0xa7, 0x28, 0x37, 0x93, 0x94, 0xd0, 0xf7, 0xb5, + 0x1e, 0x69, 0x88, 0x4b, 0xd4, 0xe3, 0x0d, 0x7d, 0x94, 0xc4, 0xd9, 0x32, 0xa9, 0x65, 0x91, 0x8f, + 0x53, 0xc3, 0xc6, 0x7a, 0x1c, 0x00, 0xab, 0x59, 0x56, 0x62, 0x2c, 0x1d, 0xc3, 0xbb, 0x68, 0x26, + 0xc6, 0x28, 0x4f, 0x92, 0x4f, 0x8a, 0xb4, 0x9c, 0x4e, 0xd2, 0x66, 0xd6, 0xb0, 0x22, 0x4d, 0x84, + 0x93, 0x63, 0x35, 0x81, 0x93, 0xcf, 0xe7, 0x8e, 0xb5, 0x09, 0x7c, 0x60, 0xac, 0x4d, 0xe0, 0xb8, + 0x89, 0xae, 0xc4, 0x98, 0x46, 0x4b, 0x9c, 0x12, 0xd3, 0xa7, 0x41, 0xf0, 0xd2, 0x63, 0x16, 0xf9, + 0xa2, 0x90, 0xb7, 0xd3, 0x91, 0xeb, 0x52, 0xbd, 0xa7, 0xc5, 0x21, 0xfd, 0x12, 0x4d, 0x4d, 0xe3, + 0x27, 0x68, 0xae, 0x67, 0x5e, 0x61, 0x6f, 0x93, 0x79, 0x0e, 0x90, 0x53, 0xd5, 0xe3, 0xfa, 0x90, + 0xb1, 0xe5, 0xd1, 0xf0, 0xe2, 0xad, 0xbe, 0x48, 0xfb, 0x33, 0xf8, 0x29, 0x9a, 0x8f, 0xc9, 0xea, + 0xa4, 0x28, 0xf4, 0x57, 0x85, 0xbe, 0x91, 0x8e, 0xd6, 0x47, 0xa6, 0x87, 0x8d, 0xe9, 0x40, 0x0a, + 0x6f, 0xa1, 0x62, 0x0c, 0x77, 0xec, 0x80, 0x93, 0x6f, 0x8a, 0xba, 0x94, 0x4e, 0xdd, 0xb1, 0x03, + 0x9e, 0xf0, 0x51, 0x18, 0x8c, 0x48, 0x62, 0x34, 0x45, 0xfa, 0x3e, 0x94, 0x24, 0x5a, 0x0f, 0x90, + 0xc2, 0x60, 0xb4, 0xf5, 0x92, 0x24, 0x1c, 0xf9, 0x26, 0x37, 0x6c, 0xeb, 0x45, 0x4d, 0xbf, 0x23, + 0x75, 0x2c, 0x72, 0xa4, 0xc4, 0x68, 0x47, 0xbe, 0xcd, 0x0d, 0x73, 0xa4, 0xa8, 0x4a, 0x71, 0x64, + 0x1c, 0x4e, 0x8e, 0x25, 0x1c, 0xf9, 0xee, 0xdc, 0xb1, 0xfa, 0x1d, 0xa9, 0x63, 0xf8, 0x39, 0x2a, + 0xf5, 0x60, 0xa4, 0x51, 0x7c, 0x60, 0x6d, 0x3b, 0x90, 0xf7, 0xd8, 0x7b, 0xc5, 0xbc, 0x33, 0x84, + 0x29, 0xe4, 0x7b, 0x91, 0x3a, 0xe4, 0x5f, 0xa6, 0xe9, 0x79, 0xdc, 0x46, 0x0b, 0x71, 0x2f, 0x6d, + 0x9d, 0x9e, 0x66, 0x1f, 0x54, 0xb3, 0xbb, 0xe9, 0xcd, 0x94, 0x4b, 0x06, 0xbb, 0x11, 0x3a, 0x44, + 0x50, 0xb9, 0x80, 0xa6, 0x37, 0xda, 0x3e, 0x7f, 0x65, 0x40, 0xe0, 0x7b, 0x6e, 0x00, 0x15, 0x1f, + 0x2d, 0x9c, 0xf3, 0x43, 0x84, 0x31, 0x1a, 0x93, 0xb7, 0x7b, 0x46, 0xde, 0xee, 0xf2, 0x59, 0xdc, + 0xfa, 0xd1, 0xf9, 0xd4, 0xb7, 0x7e, 0xf8, 0x8e, 0x97, 0x50, 0x21, 0xb0, 0xdb, 0xbe, 0x03, 0x26, + 0xf7, 0x8e, 0x40, 0x5d, 0xfa, 0x39, 0x23, 0xaf, 0x62, 0xfb, 0x22, 0xf4, 0x68, 0xee, 0xe4, 0x67, + 0x79, 0xe4, 0xe4, 0xac, 0x9c, 0x39, 0x3d, 0x2b, 0x67, 0x7e, 0x9c, 0x95, 0x33, 0xaf, 0x7f, 0x95, + 0x47, 0x0e, 0x26, 0xe4, 0x5f, 0x8e, 0xb5, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xc9, 0xfc, + 0x0e, 0xca, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto new file mode 100644 index 000000000000..25d45d3c4f23 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto @@ -0,0 +1,74 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcdserver.proto"; +import "rpc.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message RequestHeader { + uint64 ID = 1; + // username is a username that is associated with an auth token of gRPC connection + string username = 2; + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + uint64 auth_revision = 3; +} + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +message InternalRaftRequest { + RequestHeader header = 100; + uint64 ID = 1; + + Request v2 = 2; + + RangeRequest range = 3; + PutRequest put = 4; + DeleteRangeRequest delete_range = 5; + TxnRequest txn = 6; + CompactionRequest compaction = 7; + + LeaseGrantRequest lease_grant = 8; + LeaseRevokeRequest lease_revoke = 9; + + AlarmRequest alarm = 10; + + AuthEnableRequest auth_enable = 1000; + AuthDisableRequest auth_disable = 1011; + + InternalAuthenticateRequest authenticate = 1012; + + AuthUserAddRequest auth_user_add = 1100; + AuthUserDeleteRequest auth_user_delete = 1101; + AuthUserGetRequest auth_user_get = 1102; + AuthUserChangePasswordRequest auth_user_change_password = 1103; + AuthUserGrantRoleRequest auth_user_grant_role = 1104; + AuthUserRevokeRoleRequest auth_user_revoke_role = 1105; + AuthUserListRequest auth_user_list = 1106; + AuthRoleListRequest auth_role_list = 1107; + + AuthRoleAddRequest auth_role_add = 1200; + AuthRoleDeleteRequest auth_role_delete = 1201; + AuthRoleGetRequest auth_role_get = 1202; + AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; + AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; +} + +message EmptyResponse { +} + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +message InternalAuthenticateRequest { + string name = 1; + string password = 2; + + // simple_token is generated in API layer (etcdserver/v3_server.go) + string simple_token = 3; +} + diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go new file mode 100644 index 000000000000..894c815f824c --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -0,0 +1,17293 @@ +// Code generated by protoc-gen-gogo. +// source: rpc.proto +// DO NOT EDIT! + +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + + authpb "github.com/coreos/etcd/auth/authpb" + + _ "google.golang.org/genproto/googleapis/api/annotations" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AlarmType int32 + +const ( + AlarmType_NONE AlarmType = 0 + AlarmType_NOSPACE AlarmType = 1 +) + +var AlarmType_name = map[int32]string{ + 0: "NONE", + 1: "NOSPACE", +} +var AlarmType_value = map[string]int32{ + "NONE": 0, + "NOSPACE": 1, +} + +func (x AlarmType) String() string { + return proto.EnumName(AlarmType_name, int32(x)) +} +func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } + +type RangeRequest_SortOrder int32 + +const ( + RangeRequest_NONE RangeRequest_SortOrder = 0 + RangeRequest_ASCEND RangeRequest_SortOrder = 1 + RangeRequest_DESCEND RangeRequest_SortOrder = 2 +) + +var RangeRequest_SortOrder_name = map[int32]string{ + 0: "NONE", + 1: "ASCEND", + 2: "DESCEND", +} +var RangeRequest_SortOrder_value = map[string]int32{ + "NONE": 0, + "ASCEND": 1, + "DESCEND": 2, +} + +func (x RangeRequest_SortOrder) String() string { + return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) +} +func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } + +type RangeRequest_SortTarget int32 + +const ( + RangeRequest_KEY RangeRequest_SortTarget = 0 + RangeRequest_VERSION RangeRequest_SortTarget = 1 + RangeRequest_CREATE RangeRequest_SortTarget = 2 + RangeRequest_MOD RangeRequest_SortTarget = 3 + RangeRequest_VALUE RangeRequest_SortTarget = 4 +) + +var RangeRequest_SortTarget_name = map[int32]string{ + 0: "KEY", + 1: "VERSION", + 2: "CREATE", + 3: "MOD", + 4: "VALUE", +} +var RangeRequest_SortTarget_value = map[string]int32{ + "KEY": 0, + "VERSION": 1, + "CREATE": 2, + "MOD": 3, + "VALUE": 4, +} + +func (x RangeRequest_SortTarget) String() string { + return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) +} +func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} } + +type Compare_CompareResult int32 + +const ( + Compare_EQUAL Compare_CompareResult = 0 + Compare_GREATER Compare_CompareResult = 1 + Compare_LESS Compare_CompareResult = 2 + Compare_NOT_EQUAL Compare_CompareResult = 3 +) + +var Compare_CompareResult_name = map[int32]string{ + 0: "EQUAL", + 1: "GREATER", + 2: "LESS", + 3: "NOT_EQUAL", +} +var Compare_CompareResult_value = map[string]int32{ + "EQUAL": 0, + "GREATER": 1, + "LESS": 2, + "NOT_EQUAL": 3, +} + +func (x Compare_CompareResult) String() string { + return proto.EnumName(Compare_CompareResult_name, int32(x)) +} +func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } + +type Compare_CompareTarget int32 + +const ( + Compare_VERSION Compare_CompareTarget = 0 + Compare_CREATE Compare_CompareTarget = 1 + Compare_MOD Compare_CompareTarget = 2 + Compare_VALUE Compare_CompareTarget = 3 +) + +var Compare_CompareTarget_name = map[int32]string{ + 0: "VERSION", + 1: "CREATE", + 2: "MOD", + 3: "VALUE", +} +var Compare_CompareTarget_value = map[string]int32{ + "VERSION": 0, + "CREATE": 1, + "MOD": 2, + "VALUE": 3, +} + +func (x Compare_CompareTarget) String() string { + return proto.EnumName(Compare_CompareTarget_name, int32(x)) +} +func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } + +type WatchCreateRequest_FilterType int32 + +const ( + // filter out put event. + WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 + // filter out delete event. + WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 +) + +var WatchCreateRequest_FilterType_name = map[int32]string{ + 0: "NOPUT", + 1: "NODELETE", +} +var WatchCreateRequest_FilterType_value = map[string]int32{ + "NOPUT": 0, + "NODELETE": 1, +} + +func (x WatchCreateRequest_FilterType) String() string { + return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) +} +func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{19, 0} +} + +type AlarmRequest_AlarmAction int32 + +const ( + AlarmRequest_GET AlarmRequest_AlarmAction = 0 + AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 + AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 +) + +var AlarmRequest_AlarmAction_name = map[int32]string{ + 0: "GET", + 1: "ACTIVATE", + 2: "DEACTIVATE", +} +var AlarmRequest_AlarmAction_value = map[string]int32{ + "GET": 0, + "ACTIVATE": 1, + "DEACTIVATE": 2, +} + +func (x AlarmRequest_AlarmAction) String() string { + return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) +} +func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{41, 0} +} + +type ResponseHeader struct { + // cluster_id is the ID of the cluster which sent the response. + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // member_id is the ID of the member which sent the response. + MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` + // revision is the key-value store revision when the request was applied. + Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` + // raft_term is the raft term when the request was applied. + RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } + +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +type RangeRequest struct { + // key is the first key for the range. If range_end is not given, the request only looks up key. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` + // sort_order is the order for returned sorted results. + SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` + // sort_target is the key-value field to use for sorting. + SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` + // keys_only when set returns only the keys and not the values. + KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` + // count_only when set returns only the count of the keys in the range. + CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create trevisions will be filtered away. + MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` +} + +func (m *RangeRequest) Reset() { *m = RangeRequest{} } +func (m *RangeRequest) String() string { return proto.CompactTextString(m) } +func (*RangeRequest) ProtoMessage() {} +func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } + +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + +type RangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` + // more indicates if there are more keys to return in the requested range. + More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` + // count is set to the number of keys within the range when requested. + Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *RangeResponse) Reset() { *m = RangeResponse{} } +func (m *RangeResponse) String() string { return proto.CompactTextString(m) } +func (*RangeResponse) ProtoMessage() {} +func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } + +func (m *RangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { + if m != nil { + return m.Kvs + } + return nil +} + +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +type PutRequest struct { + // key is the key, in bytes, to put into the key-value store. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // value is the value, in bytes, to associate with the key in the key-value store. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } + +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + +type PutResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // if prev_kv is set in the request, the previous key-value pair will be returned. + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } + +func (m *PutResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { + if m != nil { + return m.PrevKv + } + return nil +} + +type DeleteRangeRequest struct { + // key is the first key to delete in the range. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` +} + +func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } +func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeRequest) ProtoMessage() {} +func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } + +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +type DeleteRangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // deleted is the number of keys deleted by the delete range request. + Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` + // if prev_kv is set in the request, the previous key-value pairs will be returned. + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` +} + +func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } +func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeResponse) ProtoMessage() {} +func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } + +func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + +func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { + if m != nil { + return m.PrevKvs + } + return nil +} + +type RequestOp struct { + // request is a union of request types accepted by a transaction. + // + // Types that are valid to be assigned to Request: + // *RequestOp_RequestRange + // *RequestOp_RequestPut + // *RequestOp_RequestDeleteRange + Request isRequestOp_Request `protobuf_oneof:"request"` +} + +func (m *RequestOp) Reset() { *m = RequestOp{} } +func (m *RequestOp) String() string { return proto.CompactTextString(m) } +func (*RequestOp) ProtoMessage() {} +func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } + +type isRequestOp_Request interface { + isRequestOp_Request() + MarshalTo([]byte) (int, error) + Size() int +} + +type RequestOp_RequestRange struct { + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` +} +type RequestOp_RequestPut struct { + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` +} +type RequestOp_RequestDeleteRange struct { + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` +} + +func (*RequestOp_RequestRange) isRequestOp_Request() {} +func (*RequestOp_RequestPut) isRequestOp_Request() {} +func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} + +func (m *RequestOp) GetRequest() isRequestOp_Request { + if m != nil { + return m.Request + } + return nil +} + +func (m *RequestOp) GetRequestRange() *RangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { + return x.RequestRange + } + return nil +} + +func (m *RequestOp) GetRequestPut() *PutRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { + return x.RequestPut + } + return nil +} + +func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { + return x.RequestDeleteRange + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ + (*RequestOp_RequestRange)(nil), + (*RequestOp_RequestPut)(nil), + (*RequestOp_RequestDeleteRange)(nil), + } +} + +func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RequestOp) + // request + switch x := m.Request.(type) { + case *RequestOp_RequestRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestRange); err != nil { + return err + } + case *RequestOp_RequestPut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestPut); err != nil { + return err + } + case *RequestOp_RequestDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RequestOp.Request has unexpected type %T", x) + } + return nil +} + +func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RequestOp) + switch tag { + case 1: // request.request_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestRange{msg} + return true, err + case 2: // request.request_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestPut{msg} + return true, err + case 3: // request.request_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestDeleteRange{msg} + return true, err + default: + return false, nil + } +} + +func _RequestOp_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RequestOp) + // request + switch x := m.Request.(type) { + case *RequestOp_RequestRange: + s := proto.Size(x.RequestRange) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestPut: + s := proto.Size(x.RequestPut) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestDeleteRange: + s := proto.Size(x.RequestDeleteRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ResponseOp struct { + // response is a union of response types returned by a transaction. + // + // Types that are valid to be assigned to Response: + // *ResponseOp_ResponseRange + // *ResponseOp_ResponsePut + // *ResponseOp_ResponseDeleteRange + Response isResponseOp_Response `protobuf_oneof:"response"` +} + +func (m *ResponseOp) Reset() { *m = ResponseOp{} } +func (m *ResponseOp) String() string { return proto.CompactTextString(m) } +func (*ResponseOp) ProtoMessage() {} +func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } + +type isResponseOp_Response interface { + isResponseOp_Response() + MarshalTo([]byte) (int, error) + Size() int +} + +type ResponseOp_ResponseRange struct { + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` +} +type ResponseOp_ResponsePut struct { + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` +} +type ResponseOp_ResponseDeleteRange struct { + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` +} + +func (*ResponseOp_ResponseRange) isResponseOp_Response() {} +func (*ResponseOp_ResponsePut) isResponseOp_Response() {} +func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} + +func (m *ResponseOp) GetResponse() isResponseOp_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *ResponseOp) GetResponseRange() *RangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { + return x.ResponseRange + } + return nil +} + +func (m *ResponseOp) GetResponsePut() *PutResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { + return x.ResponsePut + } + return nil +} + +func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { + return x.ResponseDeleteRange + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ + (*ResponseOp_ResponseRange)(nil), + (*ResponseOp_ResponsePut)(nil), + (*ResponseOp_ResponseDeleteRange)(nil), + } +} + +func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseOp) + // response + switch x := m.Response.(type) { + case *ResponseOp_ResponseRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseRange); err != nil { + return err + } + case *ResponseOp_ResponsePut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponsePut); err != nil { + return err + } + case *ResponseOp_ResponseDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) + } + return nil +} + +func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseOp) + switch tag { + case 1: // response.response_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseRange{msg} + return true, err + case 2: // response.response_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponsePut{msg} + return true, err + case 3: // response.response_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseDeleteRange{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseOp_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseOp) + // response + switch x := m.Response.(type) { + case *ResponseOp_ResponseRange: + s := proto.Size(x.ResponseRange) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponsePut: + s := proto.Size(x.ResponsePut) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponseDeleteRange: + s := proto.Size(x.ResponseDeleteRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Compare struct { + // result is logical comparison operation for this comparison. + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` + // target is the key-value field to inspect for the comparison. + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` + // key is the subject key for the comparison operation. + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Types that are valid to be assigned to TargetUnion: + // *Compare_Version + // *Compare_CreateRevision + // *Compare_ModRevision + // *Compare_Value + TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` +} + +func (m *Compare) Reset() { *m = Compare{} } +func (m *Compare) String() string { return proto.CompactTextString(m) } +func (*Compare) ProtoMessage() {} +func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } + +type isCompare_TargetUnion interface { + isCompare_TargetUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type Compare_Version struct { + Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"` +} +type Compare_CreateRevision struct { + CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"` +} +type Compare_ModRevision struct { + ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"` +} +type Compare_Value struct { + Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"` +} + +func (*Compare_Version) isCompare_TargetUnion() {} +func (*Compare_CreateRevision) isCompare_TargetUnion() {} +func (*Compare_ModRevision) isCompare_TargetUnion() {} +func (*Compare_Value) isCompare_TargetUnion() {} + +func (m *Compare) GetTargetUnion() isCompare_TargetUnion { + if m != nil { + return m.TargetUnion + } + return nil +} + +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Compare) GetVersion() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Version); ok { + return x.Version + } + return 0 +} + +func (m *Compare) GetCreateRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { + return x.CreateRevision + } + return 0 +} + +func (m *Compare) GetModRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { + return x.ModRevision + } + return 0 +} + +func (m *Compare) GetValue() []byte { + if x, ok := m.GetTargetUnion().(*Compare_Value); ok { + return x.Value + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ + (*Compare_Version)(nil), + (*Compare_CreateRevision)(nil), + (*Compare_ModRevision)(nil), + (*Compare_Value)(nil), + } +} + +func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + _ = b.EncodeVarint(5<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + _ = b.EncodeVarint(6<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.ModRevision)) + case *Compare_Value: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeRawBytes(x.Value) + case nil: + default: + return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x) + } + return nil +} + +func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Compare) + switch tag { + case 4: // target_union.version + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_Version{int64(x)} + return true, err + case 5: // target_union.create_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_CreateRevision{int64(x)} + return true, err + case 6: // target_union.mod_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_ModRevision{int64(x)} + return true, err + case 7: // target_union.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.TargetUnion = &Compare_Value{x} + return true, err + default: + return false, nil + } +} + +func _Compare_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.ModRevision)) + case *Compare_Value: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Value))) + n += len(x.Value) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +type TxnRequest struct { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` + // success is a list of requests which will be applied when compare evaluates to true. + Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` + // failure is a list of requests which will be applied when compare evaluates to false. + Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` +} + +func (m *TxnRequest) Reset() { *m = TxnRequest{} } +func (m *TxnRequest) String() string { return proto.CompactTextString(m) } +func (*TxnRequest) ProtoMessage() {} +func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } + +func (m *TxnRequest) GetCompare() []*Compare { + if m != nil { + return m.Compare + } + return nil +} + +func (m *TxnRequest) GetSuccess() []*RequestOp { + if m != nil { + return m.Success + } + return nil +} + +func (m *TxnRequest) GetFailure() []*RequestOp { + if m != nil { + return m.Failure + } + return nil +} + +type TxnResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // succeeded is set to true if the compare evaluated to true or false otherwise. + Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` +} + +func (m *TxnResponse) Reset() { *m = TxnResponse{} } +func (m *TxnResponse) String() string { return proto.CompactTextString(m) } +func (*TxnResponse) ProtoMessage() {} +func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } + +func (m *TxnResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + +func (m *TxnResponse) GetResponses() []*ResponseOp { + if m != nil { + return m.Responses + } + return nil +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +type CompactionRequest struct { + // revision is the key-value store revision for the compaction operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` +} + +func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } +func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } +func (*CompactionRequest) ProtoMessage() {} +func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } + +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + +type CompactionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } +func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } +func (*CompactionResponse) ProtoMessage() {} +func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } + +func (m *CompactionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type HashRequest struct { +} + +func (m *HashRequest) Reset() { *m = HashRequest{} } +func (m *HashRequest) String() string { return proto.CompactTextString(m) } +func (*HashRequest) ProtoMessage() {} +func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } + +type HashResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // hash is the hash value computed from the responding member's key-value store. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *HashResponse) Reset() { *m = HashResponse{} } +func (m *HashResponse) String() string { return proto.CompactTextString(m) } +func (*HashResponse) ProtoMessage() {} +func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } + +func (m *HashResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +type SnapshotRequest struct { +} + +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} +func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } + +type SnapshotResponse struct { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // remaining_bytes is the number of blob bytes to be sent after this message + RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` + // blob contains the next chunk of the snapshot in the snapshot stream. + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` +} + +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} +func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } + +func (m *SnapshotResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + +type WatchRequest struct { + // request_union is a request to either create a new watcher or cancel an existing watcher. + // + // Types that are valid to be assigned to RequestUnion: + // *WatchRequest_CreateRequest + // *WatchRequest_CancelRequest + RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } + +type isWatchRequest_RequestUnion interface { + isWatchRequest_RequestUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type WatchRequest_CreateRequest struct { + CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` +} +type WatchRequest_CancelRequest struct { + CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` +} + +func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} + +func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { + if m != nil { + return m.RequestUnion + } + return nil +} + +func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { + return x.CreateRequest + } + return nil +} + +func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { + return x.CancelRequest + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{ + (*WatchRequest_CreateRequest)(nil), + (*WatchRequest_CancelRequest)(nil), + } +} + +func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*WatchRequest) + // request_union + switch x := m.RequestUnion.(type) { + case *WatchRequest_CreateRequest: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CreateRequest); err != nil { + return err + } + case *WatchRequest_CancelRequest: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CancelRequest); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x) + } + return nil +} + +func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*WatchRequest) + switch tag { + case 1: // request_union.create_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchCreateRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_CreateRequest{msg} + return true, err + case 2: // request_union.cancel_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchCancelRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_CancelRequest{msg} + return true, err + default: + return false, nil + } +} + +func _WatchRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*WatchRequest) + // request_union + switch x := m.RequestUnion.(type) { + case *WatchRequest_CreateRequest: + s := proto.Size(x.CreateRequest) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *WatchRequest_CancelRequest: + s := proto.Size(x.CancelRequest) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type WatchCreateRequest struct { + // key is the key to register for watching. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` + // filters filter the events at server side before it sends back to the watcher. + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` +} + +func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } +func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCreateRequest) ProtoMessage() {} +func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } + +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +type WatchCancelRequest struct { + // watch_id is the watcher id to cancel so that no more events are transmitted. + WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` +} + +func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } +func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCancelRequest) ProtoMessage() {} +func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } + +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +type WatchResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // watch_id is the ID of the watcher that corresponds to the response. + WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` +} + +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } + +func (m *WatchResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + +func (m *WatchResponse) GetEvents() []*mvccpb.Event { + if m != nil { + return m.Events + } + return nil +} + +type LeaseGrantRequest struct { + // TTL is the advisory time-to-live in seconds. + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } +func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantRequest) ProtoMessage() {} +func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } + +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseGrantResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID for the granted lease. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the server chosen lease time-to-live in seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } +func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantResponse) ProtoMessage() {} +func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } + +func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type LeaseRevokeRequest struct { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } +func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeRequest) ProtoMessage() {} +func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } + +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseRevokeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } +func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeResponse) ProtoMessage() {} +func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } + +func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseKeepAliveRequest struct { + // ID is the lease ID for the lease to keep alive. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } +func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveRequest) ProtoMessage() {} +func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } + +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseKeepAliveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the new time-to-live for the lease. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` +} + +func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } +func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveResponse) ProtoMessage() {} +func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } + +func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +type LeaseTimeToLiveRequest struct { + // ID is the lease ID for the lease. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // keys is true to query all the keys attached to this lease. + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } + +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + +type LeaseTimeToLiveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` + // Keys is the list of keys attached to this lease. + Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } + +func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + +type Member struct { + // ID is the member ID for this member. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // peerURLs is the list of URLs the member exposes to the cluster for communication. + PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } + +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + +type MemberAddRequest struct { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` +} + +func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } +func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } +func (*MemberAddRequest) ProtoMessage() {} +func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } + +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +type MemberAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // member is the member information for the added member. + Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } +func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } +func (*MemberAddResponse) ProtoMessage() {} +func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } + +func (m *MemberAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberAddResponse) GetMember() *Member { + if m != nil { + return m.Member + } + return nil +} + +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberRemoveRequest struct { + // ID is the member ID of the member to remove. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } +func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveRequest) ProtoMessage() {} +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } + +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberRemoveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } +func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveResponse) ProtoMessage() {} +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } + +func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberUpdateRequest struct { + // ID is the member ID of the member to update. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` +} + +func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } +func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateRequest) ProtoMessage() {} +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } + +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +type MemberUpdateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } +func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateResponse) ProtoMessage() {} +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } + +func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberListRequest struct { +} + +func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } +func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } +func (*MemberListRequest) ProtoMessage() {} +func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } + +type MemberListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members associated with the cluster. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } +func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } +func (*MemberListResponse) ProtoMessage() {} +func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } + +func (m *MemberListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberListResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type DefragmentRequest struct { +} + +func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } +func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } +func (*DefragmentRequest) ProtoMessage() {} +func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } + +type DefragmentResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } +func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } +func (*DefragmentResponse) ProtoMessage() {} +func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } + +func (m *DefragmentResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AlarmRequest struct { + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm to consider for this request. + Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } +func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } +func (*AlarmRequest) ProtoMessage() {} +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } + +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmMember struct { + // memberID is the ID of the member associated with the raised alarm. + MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm which has been raised. + Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmMember) Reset() { *m = AlarmMember{} } +func (m *AlarmMember) String() string { return proto.CompactTextString(m) } +func (*AlarmMember) ProtoMessage() {} +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } + +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // alarms is a list of alarms associated with the alarm request. + Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` +} + +func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } +func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } +func (*AlarmResponse) ProtoMessage() {} +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } + +func (m *AlarmResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AlarmResponse) GetAlarms() []*AlarmMember { + if m != nil { + return m.Alarms + } + return nil +} + +type StatusRequest struct { +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } + +type StatusResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // version is the cluster protocol version used by the responding member. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // dbSize is the size of the backend database, in bytes, of the responding member. + DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` + // leader is the member ID which the responding member believes is the current leader. + Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` + // raftIndex is the current raft index of the responding member. + RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` + // raftTerm is the current raft term of the responding member. + RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } + +func (m *StatusResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +type AuthEnableRequest struct { +} + +func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } +func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthEnableRequest) ProtoMessage() {} +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } + +type AuthDisableRequest struct { +} + +func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } +func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthDisableRequest) ProtoMessage() {} +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } + +type AuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } +func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*AuthenticateRequest) ProtoMessage() {} +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } + +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserAddRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } +func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddRequest) ProtoMessage() {} +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } + +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } +func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetRequest) ProtoMessage() {} +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } + +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserDeleteRequest struct { + // name is the name of the user to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } +func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteRequest) ProtoMessage() {} +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } + +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserChangePasswordRequest struct { + // name is the name of the user whose password is being changed. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // password is the new password for the user. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } +func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordRequest) ProtoMessage() {} +func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{52} +} + +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserGrantRoleRequest struct { + // user is the name of the user which should be granted a given role. + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + // role is the name of the role to grant to the user. + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } +func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleRequest) ProtoMessage() {} +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } + +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserRevokeRoleRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } +func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleRequest) ProtoMessage() {} +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } + +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleAddRequest struct { + // name is the name of the role to add to the authentication system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } +func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddRequest) ProtoMessage() {} +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } + +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthRoleGetRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } +func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetRequest) ProtoMessage() {} +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } + +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserListRequest struct { +} + +func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } +func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserListRequest) ProtoMessage() {} +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } + +type AuthRoleListRequest struct { +} + +func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } +func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListRequest) ProtoMessage() {} +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } + +type AuthRoleDeleteRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } +func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteRequest) ProtoMessage() {} +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } + +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleGrantPermissionRequest struct { + // name is the name of the role which will be granted the permission. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // perm is the permission to grant to the role. + Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` +} + +func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } +func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} +func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{60} +} + +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleRevokePermissionRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } +func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} +func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{61} +} + +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + +type AuthEnableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } +func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthEnableResponse) ProtoMessage() {} +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } + +func (m *AuthEnableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthDisableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } +func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthDisableResponse) ProtoMessage() {} +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } + +func (m *AuthDisableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthenticateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // token is an authorized token that can be used in succeeding RPCs + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` +} + +func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } +func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } +func (*AuthenticateResponse) ProtoMessage() {} +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } + +func (m *AuthenticateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +type AuthUserAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } +func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddResponse) ProtoMessage() {} +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } + +func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` +} + +func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } +func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetResponse) ProtoMessage() {} +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } + +func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } +func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteResponse) ProtoMessage() {} +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } + +func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserChangePasswordResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } +func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordResponse) ProtoMessage() {} +func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{68} +} + +func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGrantRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } +func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleResponse) ProtoMessage() {} +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } + +func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserRevokeRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } +func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleResponse) ProtoMessage() {} +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } + +func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } +func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddResponse) ProtoMessage() {} +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } + +func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` +} + +func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } +func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetResponse) ProtoMessage() {} +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } + +func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` +} + +func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } +func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListResponse) ProtoMessage() {} +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } + +func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` +} + +func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } +func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserListResponse) ProtoMessage() {} +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } + +func (m *AuthUserListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + +type AuthRoleDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } +func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteResponse) ProtoMessage() {} +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } + +func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGrantPermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } +func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} +func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{76} +} + +func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleRevokePermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } +func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} +func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{77} +} + +func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") + proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") + proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") + proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") + proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") + proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") + proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") + proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") + proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") + proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") + proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") + proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") + proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") + proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") + proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") + proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") + proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") + proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") + proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") + proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") + proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") + proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") + proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") + proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") + proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") + proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") + proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") + proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") + proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") + proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") + proto.RegisterType((*Member)(nil), "etcdserverpb.Member") + proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") + proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") + proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") + proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") + proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") + proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") + proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") + proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") + proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") + proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") + proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") + proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") + proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") + proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") + proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") + proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") + proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") + proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") + proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") + proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") + proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") + proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") + proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") + proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") + proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") + proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") + proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") + proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") + proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") + proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") + proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") + proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") + proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") + proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") + proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") + proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") + proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") + proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") + proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") + proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") + proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") + proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") + proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") + proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") + proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") + proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") + proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) + proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for KV service + +type KVClient interface { + // Range gets the keys in the range from the key-value store. + Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) +} + +type kVClient struct { + cc *grpc.ClientConn +} + +func NewKVClient(cc *grpc.ClientConn) KVClient { + return &kVClient{cc} +} + +func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { + out := new(RangeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { + out := new(PutResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { + out := new(DeleteRangeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { + out := new(TxnResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { + out := new(CompactionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for KV service + +type KVServer interface { + // Range gets the keys in the range from the key-value store. + Range(context.Context, *RangeRequest) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(context.Context, *PutRequest) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(context.Context, *TxnRequest) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) +} + +func RegisterKVServer(s *grpc.Server, srv KVServer) { + s.RegisterService(&_KV_serviceDesc, srv) +} + +func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Range(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Range", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Range(ctx, req.(*RangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Put(ctx, req.(*PutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).DeleteRange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/DeleteRange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TxnRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Txn(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Txn", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Compact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Compact", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KV_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.KV", + HandlerType: (*KVServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Range", + Handler: _KV_Range_Handler, + }, + { + MethodName: "Put", + Handler: _KV_Put_Handler, + }, + { + MethodName: "DeleteRange", + Handler: _KV_DeleteRange_Handler, + }, + { + MethodName: "Txn", + Handler: _KV_Txn_Handler, + }, + { + MethodName: "Compact", + Handler: _KV_Compact_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// Client API for Watch service + +type WatchClient interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + return x, nil +} + +type Watch_WatchClient interface { + Send(*WatchRequest) error + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Send(m *WatchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *watchWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watch service + +type WatchServer interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(Watch_WatchServer) error +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WatchServer).Watch(&watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchResponse) error + Recv() (*WatchRequest, error) + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *watchWatchServer) Recv() (*WatchRequest, error) { + m := new(WatchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Lease service + +type LeaseClient interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) +} + +type leaseClient struct { + cc *grpc.ClientConn +} + +func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { + return &leaseClient{cc} +} + +func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { + out := new(LeaseGrantResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { + out := new(LeaseRevokeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) + if err != nil { + return nil, err + } + x := &leaseLeaseKeepAliveClient{stream} + return x, nil +} + +type Lease_LeaseKeepAliveClient interface { + Send(*LeaseKeepAliveRequest) error + Recv() (*LeaseKeepAliveResponse, error) + grpc.ClientStream +} + +type leaseLeaseKeepAliveClient struct { + grpc.ClientStream +} + +func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { + m := new(LeaseKeepAliveResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { + out := new(LeaseTimeToLiveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Lease service + +type LeaseServer interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(Lease_LeaseKeepAliveServer) error + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) +} + +func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { + s.RegisterService(&_Lease_serviceDesc, srv) +} + +func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseGrantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseGrant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseGrant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseRevokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseRevoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseRevoke", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) +} + +type Lease_LeaseKeepAliveServer interface { + Send(*LeaseKeepAliveResponse) error + Recv() (*LeaseKeepAliveRequest, error) + grpc.ServerStream +} + +type leaseLeaseKeepAliveServer struct { + grpc.ServerStream +} + +func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { + m := new(LeaseKeepAliveRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseTimeToLiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseTimeToLive(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lease_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Lease", + HandlerType: (*LeaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LeaseGrant", + Handler: _Lease_LeaseGrant_Handler, + }, + { + MethodName: "LeaseRevoke", + Handler: _Lease_LeaseRevoke_Handler, + }, + { + MethodName: "LeaseTimeToLive", + Handler: _Lease_LeaseTimeToLive_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "LeaseKeepAlive", + Handler: _Lease_LeaseKeepAlive_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Cluster service + +type ClusterClient interface { + // MemberAdd adds a member into the cluster. + MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) +} + +type clusterClient struct { + cc *grpc.ClientConn +} + +func NewClusterClient(cc *grpc.ClientConn) ClusterClient { + return &clusterClient{cc} +} + +func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { + out := new(MemberAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { + out := new(MemberRemoveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { + out := new(MemberUpdateResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { + out := new(MemberListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Cluster service + +type ClusterServer interface { + // MemberAdd adds a member into the cluster. + MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) +} + +func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { + s.RegisterService(&_Cluster_serviceDesc, srv) +} + +func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberRemoveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberRemove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberRemove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Cluster_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Cluster", + HandlerType: (*ClusterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MemberAdd", + Handler: _Cluster_MemberAdd_Handler, + }, + { + MethodName: "MemberRemove", + Handler: _Cluster_MemberRemove_Handler, + }, + { + MethodName: "MemberUpdate", + Handler: _Cluster_MemberUpdate_Handler, + }, + { + MethodName: "MemberList", + Handler: _Cluster_MemberList_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// Client API for Maintenance service + +type MaintenanceClient interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) + // Status gets the status of the member. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) + // Hash returns the hash of the local KV state for consistency checking purpose. + // This is designed for testing; do not use this in production when there + // are ongoing transactions. + Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) +} + +type maintenanceClient struct { + cc *grpc.ClientConn +} + +func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { + return &maintenanceClient{cc} +} + +func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { + out := new(AlarmResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { + out := new(DefragmentResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { + out := new(HashResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) + if err != nil { + return nil, err + } + x := &maintenanceSnapshotClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Maintenance_SnapshotClient interface { + Recv() (*SnapshotResponse, error) + grpc.ClientStream +} + +type maintenanceSnapshotClient struct { + grpc.ClientStream +} + +func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { + m := new(SnapshotResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Maintenance service + +type MaintenanceServer interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) + // Status gets the status of the member. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) + // Hash returns the hash of the local KV state for consistency checking purpose. + // This is designed for testing; do not use this in production when there + // are ongoing transactions. + Hash(context.Context, *HashRequest) (*HashResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error +} + +func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { + s.RegisterService(&_Maintenance_serviceDesc, srv) +} + +func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AlarmRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Alarm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Alarm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DefragmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Defragment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Defragment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Hash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Hash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SnapshotRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) +} + +type Maintenance_SnapshotServer interface { + Send(*SnapshotResponse) error + grpc.ServerStream +} + +type maintenanceSnapshotServer struct { + grpc.ServerStream +} + +func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Maintenance_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Maintenance", + HandlerType: (*MaintenanceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Alarm", + Handler: _Maintenance_Alarm_Handler, + }, + { + MethodName: "Status", + Handler: _Maintenance_Status_Handler, + }, + { + MethodName: "Defragment", + Handler: _Maintenance_Defragment_Handler, + }, + { + MethodName: "Hash", + Handler: _Maintenance_Hash_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Snapshot", + Handler: _Maintenance_Snapshot_Handler, + ServerStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Auth service + +type AuthClient interface { + // AuthEnable enables authentication. + AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) + // Authenticate processes an authenticate request. + Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) + // UserAdd adds a new user. + UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. + RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) +} + +type authClient struct { + cc *grpc.ClientConn +} + +func NewAuthClient(cc *grpc.ClientConn) AuthClient { + return &authClient{cc} +} + +func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { + out := new(AuthEnableResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { + out := new(AuthDisableResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { + out := new(AuthenticateResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { + out := new(AuthUserAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { + out := new(AuthUserGetResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { + out := new(AuthUserListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { + out := new(AuthUserDeleteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { + out := new(AuthUserChangePasswordResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { + out := new(AuthUserGrantRoleResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { + out := new(AuthUserRevokeRoleResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { + out := new(AuthRoleAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { + out := new(AuthRoleGetResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { + out := new(AuthRoleListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { + out := new(AuthRoleDeleteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { + out := new(AuthRoleGrantPermissionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { + out := new(AuthRoleRevokePermissionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Auth service + +type AuthServer interface { + // AuthEnable enables authentication. + AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) + // Authenticate processes an authenticate request. + Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) + // UserAdd adds a new user. + UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. + RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) +} + +func RegisterAuthServer(s *grpc.Server, srv AuthServer) { + s.RegisterService(&_Auth_serviceDesc, srv) +} + +func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthEnableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthEnable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthEnable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthDisableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthDisable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthDisable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthenticateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).Authenticate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/Authenticate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserChangePasswordRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserChangePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserChangePassword", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGrantRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGrantRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGrantRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserRevokeRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserRevokeRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserRevokeRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGrantPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleRevokePermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleRevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Auth_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Auth", + HandlerType: (*AuthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AuthEnable", + Handler: _Auth_AuthEnable_Handler, + }, + { + MethodName: "AuthDisable", + Handler: _Auth_AuthDisable_Handler, + }, + { + MethodName: "Authenticate", + Handler: _Auth_Authenticate_Handler, + }, + { + MethodName: "UserAdd", + Handler: _Auth_UserAdd_Handler, + }, + { + MethodName: "UserGet", + Handler: _Auth_UserGet_Handler, + }, + { + MethodName: "UserList", + Handler: _Auth_UserList_Handler, + }, + { + MethodName: "UserDelete", + Handler: _Auth_UserDelete_Handler, + }, + { + MethodName: "UserChangePassword", + Handler: _Auth_UserChangePassword_Handler, + }, + { + MethodName: "UserGrantRole", + Handler: _Auth_UserGrantRole_Handler, + }, + { + MethodName: "UserRevokeRole", + Handler: _Auth_UserRevokeRole_Handler, + }, + { + MethodName: "RoleAdd", + Handler: _Auth_RoleAdd_Handler, + }, + { + MethodName: "RoleGet", + Handler: _Auth_RoleGet_Handler, + }, + { + MethodName: "RoleList", + Handler: _Auth_RoleList_Handler, + }, + { + MethodName: "RoleDelete", + Handler: _Auth_RoleDelete_Handler, + }, + { + MethodName: "RoleGrantPermission", + Handler: _Auth_RoleGrantPermission_Handler, + }, + { + MethodName: "RoleRevokePermission", + Handler: _Auth_RoleRevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) + } + if m.MemberId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) + } + if m.Revision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.RaftTerm != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + } + return i, nil +} + +func (m *RangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + } + if m.Revision != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.SortOrder != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) + } + if m.Serializable { + dAtA[i] = 0x38 + i++ + if m.Serializable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.KeysOnly { + dAtA[i] = 0x40 + i++ + if m.KeysOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CountOnly { + dAtA[i] = 0x48 + i++ + if m.CountOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.MinModRevision != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) + } + return i, nil +} + +func (m *RangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Kvs) > 0 { + for _, msg := range m.Kvs { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.More { + dAtA[i] = 0x18 + i++ + if m.More { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Count != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Count)) + } + return i, nil +} + +func (m *PutRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + } + if m.PrevKv { + dAtA[i] = 0x20 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreValue { + dAtA[i] = 0x28 + i++ + if m.IgnoreValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreLease { + dAtA[i] = 0x30 + i++ + if m.IgnoreLease { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PutResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PrevKv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) + n3, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.PrevKv { + dAtA[i] = 0x18 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n4, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Deleted != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, msg := range m.PrevKvs { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RequestOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + nn5, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestRange != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) + n6, err := m.RequestRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestPut != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) + n7, err := m.RequestPut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestDeleteRange != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) + n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *ResponseOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Response != nil { + nn9, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn9 + } + return i, nil +} + +func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseRange != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) + n10, err := m.ResponseRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponsePut != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) + n11, err := m.ResponsePut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseDeleteRange != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) + n12, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *Compare) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Compare) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Result != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Result)) + } + if m.Target != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Target)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.TargetUnion != nil { + nn13, err := m.TargetUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn13 + } + return i, nil +} + +func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Version)) + return i, nil +} +func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) + return i, nil +} +func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) + return i, nil +} +func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Value != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} +func (m *TxnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Compare) > 0 { + for _, msg := range m.Compare { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Success) > 0 { + for _, msg := range m.Success { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Failure) > 0 { + for _, msg := range m.Failure { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TxnResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n14, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.Succeeded { + dAtA[i] = 0x10 + i++ + if m.Succeeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Responses) > 0 { + for _, msg := range m.Responses { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.Physical { + dAtA[i] = 0x10 + i++ + if m.Physical { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n15, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *HashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *HashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n16, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.Hash != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + } + return i, nil +} + +func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n17, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.RemainingBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) + } + if len(m.Blob) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) + i += copy(dAtA[i:], m.Blob) + } + return i, nil +} + +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequestUnion != nil { + nn18, err := m.RequestUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn18 + } + return i, nil +} + +func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CreateRequest != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) + n19, err := m.CreateRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} +func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CancelRequest != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) + n20, err := m.CancelRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} +func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.StartRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) + } + if m.ProgressNotify { + dAtA[i] = 0x20 + i++ + if m.ProgressNotify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Filters) > 0 { + dAtA22 := make([]byte, len(m.Filters)*10) + var j21 int + for _, num := range m.Filters { + for num >= 1<<7 { + dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j21++ + } + dAtA22[j21] = uint8(num) + j21++ + } + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(j21)) + i += copy(dAtA[i:], dAtA22[:j21]) + } + if m.PrevKv { + dAtA[i] = 0x30 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WatchId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + return i, nil +} + +func (m *WatchResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n23, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.WatchId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + if m.Created { + dAtA[i] = 0x18 + i++ + if m.Created { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Canceled { + dAtA[i] = 0x20 + i++ + if m.Canceled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CompactRevision != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x5a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TTL != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n24, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if len(m.Error) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n25, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n26, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + return i, nil +} + +func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.Keys { + dAtA[i] = 0x10 + i++ + if m.Keys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n27, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + return i, nil +} + +func (m *Member) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Member) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n28, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.Member != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) + n29, err := m.Member.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n30, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n31, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n32, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n33, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + return i, nil +} + +func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Action)) + } + if m.MemberID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + } + if m.Alarm != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + } + return i, nil +} + +func (m *AlarmMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MemberID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + } + if m.Alarm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + } + return i, nil +} + +func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n34, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if len(m.Alarms) > 0 { + for _, msg := range m.Alarms { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n35, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + if len(m.Version) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + if m.DbSize != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) + } + if m.Leader != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) + } + if m.RaftIndex != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + } + return i, nil +} + +func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Perm != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) + n36, err := m.Perm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n37, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n38, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + return i, nil +} + +func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n39, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if len(m.Token) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + } + return i, nil +} + +func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n40, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n41, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n42, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} + +func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n43, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + return i, nil +} + +func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n44, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + return i, nil +} + +func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n45, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + return i, nil +} + +func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n46, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + return i, nil +} + +func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n47, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + if len(m.Perm) > 0 { + for _, msg := range m.Perm { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n48, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n49, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if len(m.Users) > 0 { + for _, s := range m.Users { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n50, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + return i, nil +} + +func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n51, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + } + return i, nil +} + +func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n52, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + } + return i, nil +} + +func encodeFixed64Rpc(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Rpc(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ResponseHeader) Size() (n int) { + var l int + _ = l + if m.ClusterId != 0 { + n += 1 + sovRpc(uint64(m.ClusterId)) + } + if m.MemberId != 0 { + n += 1 + sovRpc(uint64(m.MemberId)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + return n +} + +func (m *RangeRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.SortOrder != 0 { + n += 1 + sovRpc(uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + n += 1 + sovRpc(uint64(m.SortTarget)) + } + if m.Serializable { + n += 2 + } + if m.KeysOnly { + n += 2 + } + if m.CountOnly { + n += 2 + } + if m.MinModRevision != 0 { + n += 1 + sovRpc(uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxCreateRevision)) + } + return n +} + +func (m *RangeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Kvs) > 0 { + for _, e := range m.Kvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.More { + n += 2 + } + if m.Count != 0 { + n += 1 + sovRpc(uint64(m.Count)) + } + return n +} + +func (m *PutRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovRpc(uint64(m.Lease)) + } + if m.PrevKv { + n += 2 + } + if m.IgnoreValue { + n += 2 + } + if m.IgnoreLease { + n += 2 + } + return n +} + +func (m *PutResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *DeleteRangeRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv { + n += 2 + } + return n +} + +func (m *DeleteRangeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Deleted != 0 { + n += 1 + sovRpc(uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, e := range m.PrevKvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *RequestOp) Size() (n int) { + var l int + _ = l + if m.Request != nil { + n += m.Request.Size() + } + return n +} + +func (m *RequestOp_RequestRange) Size() (n int) { + var l int + _ = l + if m.RequestRange != nil { + l = m.RequestRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestPut) Size() (n int) { + var l int + _ = l + if m.RequestPut != nil { + l = m.RequestPut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestDeleteRange) Size() (n int) { + var l int + _ = l + if m.RequestDeleteRange != nil { + l = m.RequestDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp) Size() (n int) { + var l int + _ = l + if m.Response != nil { + n += m.Response.Size() + } + return n +} + +func (m *ResponseOp_ResponseRange) Size() (n int) { + var l int + _ = l + if m.ResponseRange != nil { + l = m.ResponseRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponsePut) Size() (n int) { + var l int + _ = l + if m.ResponsePut != nil { + l = m.ResponsePut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { + var l int + _ = l + if m.ResponseDeleteRange != nil { + l = m.ResponseDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare) Size() (n int) { + var l int + _ = l + if m.Result != 0 { + n += 1 + sovRpc(uint64(m.Result)) + } + if m.Target != 0 { + n += 1 + sovRpc(uint64(m.Target)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.TargetUnion != nil { + n += m.TargetUnion.Size() + } + return n +} + +func (m *Compare_Version) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.Version)) + return n +} +func (m *Compare_CreateRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.CreateRevision)) + return n +} +func (m *Compare_ModRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.ModRevision)) + return n +} +func (m *Compare_Value) Size() (n int) { + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *TxnRequest) Size() (n int) { + var l int + _ = l + if len(m.Compare) > 0 { + for _, e := range m.Compare { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Success) > 0 { + for _, e := range m.Success { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Failure) > 0 { + for _, e := range m.Failure { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *TxnResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Succeeded { + n += 2 + } + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *CompactionRequest) Size() (n int) { + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.Physical { + n += 2 + } + return n +} + +func (m *CompactionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *HashRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *HashResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + return n +} + +func (m *SnapshotRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SnapshotResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.RemainingBytes != 0 { + n += 1 + sovRpc(uint64(m.RemainingBytes)) + } + l = len(m.Blob) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *WatchRequest) Size() (n int) { + var l int + _ = l + if m.RequestUnion != nil { + n += m.RequestUnion.Size() + } + return n +} + +func (m *WatchRequest_CreateRequest) Size() (n int) { + var l int + _ = l + if m.CreateRequest != nil { + l = m.CreateRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchRequest_CancelRequest) Size() (n int) { + var l int + _ = l + if m.CancelRequest != nil { + l = m.CancelRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchCreateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.StartRevision != 0 { + n += 1 + sovRpc(uint64(m.StartRevision)) + } + if m.ProgressNotify { + n += 2 + } + if len(m.Filters) > 0 { + l = 0 + for _, e := range m.Filters { + l += sovRpc(uint64(e)) + } + n += 1 + sovRpc(uint64(l)) + l + } + if m.PrevKv { + n += 2 + } + return n +} + +func (m *WatchCancelRequest) Size() (n int) { + var l int + _ = l + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + return n +} + +func (m *WatchResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.Created { + n += 2 + } + if m.Canceled { + n += 2 + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseGrantRequest) Size() (n int) { + var l int + _ = l + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseGrantResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseRevokeRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseRevokeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseKeepAliveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseKeepAliveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + return n +} + +func (m *LeaseTimeToLiveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Keys { + n += 2 + } + return n +} + +func (m *LeaseTimeToLiveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + n += 1 + sovRpc(uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + l = len(b) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *Member) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberAddRequest) Size() (n int) { + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Member != nil { + l = m.Member.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberRemoveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *MemberRemoveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberUpdateRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberUpdateResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *MemberListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *DefragmentRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *DefragmentResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AlarmRequest) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRpc(uint64(m.Action)) + } + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + return n +} + +func (m *AlarmMember) Size() (n int) { + var l int + _ = l + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + return n +} + +func (m *AlarmResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Alarms) > 0 { + for _, e := range m.Alarms { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *StatusRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *StatusResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.DbSize != 0 { + n += 1 + sovRpc(uint64(m.DbSize)) + } + if m.Leader != 0 { + n += 1 + sovRpc(uint64(m.Leader)) + } + if m.RaftIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + return n +} + +func (m *AuthEnableRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthDisableRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthenticateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserAddRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserDeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserChangePasswordRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGrantRoleRequest) Size() (n int) { + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserRevokeRoleRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleAddRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthRoleListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthRoleDeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGrantPermissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Perm != nil { + l = m.Perm.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleRevokePermissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthEnableResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthDisableResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthenticateResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGetResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthUserDeleteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserChangePasswordResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGrantRoleResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserRevokeRoleResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGetResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Perm) > 0 { + for _, e := range m.Perm { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthRoleListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthUserListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthRoleDeleteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGrantPermissionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleRevokePermissionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func sovRpc(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRpc(x uint64) (n int) { + return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResponseHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + m.ClusterId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) + } + m.MemberId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) + } + m.SortOrder = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) + } + m.SortTarget = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Serializable = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.KeysOnly = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CountOnly = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) + } + m.MinModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) + } + m.MaxModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) + } + m.MinCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) + } + m.MaxCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.More = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreValue = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreLease = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &mvccpb.KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + m.Deleted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Deleted |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) + if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestPut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestDeleteRange{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponsePut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseDeleteRange{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Compare) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Compare: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= (Compare_CompareResult(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + m.Target = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Version{v} + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_CreateRevision{v} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_ModRevision{v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.TargetUnion = &Compare_Value{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Compare = append(m.Compare, &Compare{}) + if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Success = append(m.Success, &RequestOp{}) + if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Failure = append(m.Failure, &RequestOp{}) + if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Succeeded = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Responses = append(m.Responses, &ResponseOp{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Physical = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) + } + m.RemainingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemainingBytes |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) + if m.Blob == nil { + m.Blob = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCreateRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CreateRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCancelRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CancelRequest{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) + } + m.StartRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressNotify = bool(v != 0) + case 5: + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Created = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Canceled = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &mvccpb.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Keys = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) + } + m.GrantedTTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GrantedTTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) + copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Member) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Member: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Member == nil { + m.Member = &Member{} + } + if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= (AlarmType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= (AlarmType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alarms = append(m.Alarms, &AlarmMember{}) + if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) + } + m.DbSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSize |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + m.Leader = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Leader |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + } + m.RaftIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Perm == nil { + m.Perm = &authpb.Permission{} + } + if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Perm = append(m.Perm, &authpb.Permission{}) + if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRpc + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRpc(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } + +var fileDescriptorRpc = []byte{ + // 3450 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3b, 0x5b, 0x6f, 0x1b, 0xc7, + 0xb9, 0x5a, 0x5e, 0xc5, 0x8f, 0x17, 0xd1, 0x23, 0xd9, 0xa6, 0x68, 0x5b, 0x96, 0xc7, 0x37, 0xd9, + 0x4e, 0xa4, 0x44, 0xc9, 0x39, 0x0f, 0x3e, 0x41, 0x70, 0x64, 0x89, 0xb1, 0x74, 0x24, 0x4b, 0xce, + 0x4a, 0x76, 0x72, 0x80, 0xa0, 0xc4, 0x8a, 0x1c, 0x53, 0x0b, 0x91, 0xbb, 0xcc, 0xee, 0x92, 0x96, + 0xd2, 0x14, 0x28, 0xd2, 0x04, 0x45, 0x0b, 0xf4, 0xa5, 0x79, 0xe8, 0xed, 0xb1, 0x28, 0x8a, 0xfc, + 0x80, 0xbe, 0xf5, 0x07, 0x14, 0x7d, 0x69, 0x81, 0xfe, 0x81, 0x22, 0xed, 0x63, 0xdf, 0xfb, 0x54, + 0xb4, 0x98, 0xdb, 0xee, 0xec, 0x72, 0x97, 0x52, 0xca, 0x26, 0x2f, 0xd6, 0xce, 0x37, 0xdf, 0x7c, + 0xb7, 0x99, 0xef, 0x32, 0xdf, 0xd0, 0x50, 0x70, 0xfa, 0xad, 0xe5, 0xbe, 0x63, 0x7b, 0x36, 0x2a, + 0x11, 0xaf, 0xd5, 0x76, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb0, 0x3e, 0xd7, 0xb1, 0x3b, 0x36, 0x9b, + 0x58, 0xa1, 0x5f, 0x1c, 0xa7, 0x3e, 0x4f, 0x71, 0x56, 0x7a, 0xc3, 0x56, 0x8b, 0xfd, 0xd3, 0x3f, + 0x5c, 0x39, 0x1e, 0x8a, 0xa9, 0x2b, 0x6c, 0xca, 0x18, 0x78, 0x47, 0xec, 0x9f, 0xfe, 0x21, 0xfb, + 0x23, 0x26, 0xaf, 0x76, 0x6c, 0xbb, 0xd3, 0x25, 0x2b, 0x46, 0xdf, 0x5c, 0x31, 0x2c, 0xcb, 0xf6, + 0x0c, 0xcf, 0xb4, 0x2d, 0x97, 0xcf, 0xe2, 0xcf, 0x34, 0xa8, 0xe8, 0xc4, 0xed, 0xdb, 0x96, 0x4b, + 0x36, 0x89, 0xd1, 0x26, 0x0e, 0xba, 0x06, 0xd0, 0xea, 0x0e, 0x5c, 0x8f, 0x38, 0x4d, 0xb3, 0x5d, + 0xd3, 0x16, 0xb5, 0xa5, 0x8c, 0x5e, 0x10, 0x90, 0xad, 0x36, 0xba, 0x02, 0x85, 0x1e, 0xe9, 0x1d, + 0xf2, 0xd9, 0x14, 0x9b, 0x9d, 0xe6, 0x80, 0xad, 0x36, 0xaa, 0xc3, 0xb4, 0x43, 0x86, 0xa6, 0x6b, + 0xda, 0x56, 0x2d, 0xbd, 0xa8, 0x2d, 0xa5, 0x75, 0x7f, 0x4c, 0x17, 0x3a, 0xc6, 0x0b, 0xaf, 0xe9, + 0x11, 0xa7, 0x57, 0xcb, 0xf0, 0x85, 0x14, 0x70, 0x40, 0x9c, 0x1e, 0xfe, 0x34, 0x0b, 0x25, 0xdd, + 0xb0, 0x3a, 0x44, 0x27, 0x1f, 0x0e, 0x88, 0xeb, 0xa1, 0x2a, 0xa4, 0x8f, 0xc9, 0x29, 0x63, 0x5f, + 0xd2, 0xe9, 0x27, 0x5f, 0x6f, 0x75, 0x48, 0x93, 0x58, 0x9c, 0x71, 0x89, 0xae, 0xb7, 0x3a, 0xa4, + 0x61, 0xb5, 0xd1, 0x1c, 0x64, 0xbb, 0x66, 0xcf, 0xf4, 0x04, 0x57, 0x3e, 0x08, 0x89, 0x93, 0x89, + 0x88, 0xb3, 0x0e, 0xe0, 0xda, 0x8e, 0xd7, 0xb4, 0x9d, 0x36, 0x71, 0x6a, 0xd9, 0x45, 0x6d, 0xa9, + 0xb2, 0x7a, 0x6b, 0x59, 0xdd, 0x88, 0x65, 0x55, 0xa0, 0xe5, 0x7d, 0xdb, 0xf1, 0xf6, 0x28, 0xae, + 0x5e, 0x70, 0xe5, 0x27, 0x7a, 0x07, 0x8a, 0x8c, 0x88, 0x67, 0x38, 0x1d, 0xe2, 0xd5, 0x72, 0x8c, + 0xca, 0xed, 0x33, 0xa8, 0x1c, 0x30, 0x64, 0x9d, 0xb1, 0xe7, 0xdf, 0x08, 0x43, 0xc9, 0x25, 0x8e, + 0x69, 0x74, 0xcd, 0x8f, 0x8c, 0xc3, 0x2e, 0xa9, 0xe5, 0x17, 0xb5, 0xa5, 0x69, 0x3d, 0x04, 0xa3, + 0xfa, 0x1f, 0x93, 0x53, 0xb7, 0x69, 0x5b, 0xdd, 0xd3, 0xda, 0x34, 0x43, 0x98, 0xa6, 0x80, 0x3d, + 0xab, 0x7b, 0xca, 0x36, 0xcd, 0x1e, 0x58, 0x1e, 0x9f, 0x2d, 0xb0, 0xd9, 0x02, 0x83, 0xb0, 0xe9, + 0x25, 0xa8, 0xf6, 0x4c, 0xab, 0xd9, 0xb3, 0xdb, 0x4d, 0xdf, 0x20, 0xc0, 0x0c, 0x52, 0xe9, 0x99, + 0xd6, 0x13, 0xbb, 0xad, 0x4b, 0xb3, 0x50, 0x4c, 0xe3, 0x24, 0x8c, 0x59, 0x14, 0x98, 0xc6, 0x89, + 0x8a, 0xb9, 0x0c, 0xb3, 0x94, 0x66, 0xcb, 0x21, 0x86, 0x47, 0x02, 0xe4, 0x12, 0x43, 0xbe, 0xd0, + 0x33, 0xad, 0x75, 0x36, 0x13, 0xc2, 0x37, 0x4e, 0x46, 0xf0, 0xcb, 0x02, 0xdf, 0x38, 0x09, 0xe3, + 0xe3, 0x65, 0x28, 0xf8, 0x36, 0x47, 0xd3, 0x90, 0xd9, 0xdd, 0xdb, 0x6d, 0x54, 0xa7, 0x10, 0x40, + 0x6e, 0x6d, 0x7f, 0xbd, 0xb1, 0xbb, 0x51, 0xd5, 0x50, 0x11, 0xf2, 0x1b, 0x0d, 0x3e, 0x48, 0xe1, + 0x47, 0x00, 0x81, 0x75, 0x51, 0x1e, 0xd2, 0xdb, 0x8d, 0xff, 0xaf, 0x4e, 0x51, 0x9c, 0xe7, 0x0d, + 0x7d, 0x7f, 0x6b, 0x6f, 0xb7, 0xaa, 0xd1, 0xc5, 0xeb, 0x7a, 0x63, 0xed, 0xa0, 0x51, 0x4d, 0x51, + 0x8c, 0x27, 0x7b, 0x1b, 0xd5, 0x34, 0x2a, 0x40, 0xf6, 0xf9, 0xda, 0xce, 0xb3, 0x46, 0x35, 0x83, + 0x3f, 0xd7, 0xa0, 0x2c, 0xf6, 0x8b, 0xfb, 0x04, 0x7a, 0x13, 0x72, 0x47, 0xcc, 0x2f, 0xd8, 0x51, + 0x2c, 0xae, 0x5e, 0x8d, 0x6c, 0x6e, 0xc8, 0x77, 0x74, 0x81, 0x8b, 0x30, 0xa4, 0x8f, 0x87, 0x6e, + 0x2d, 0xb5, 0x98, 0x5e, 0x2a, 0xae, 0x56, 0x97, 0xb9, 0xc3, 0x2e, 0x6f, 0x93, 0xd3, 0xe7, 0x46, + 0x77, 0x40, 0x74, 0x3a, 0x89, 0x10, 0x64, 0x7a, 0xb6, 0x43, 0xd8, 0x89, 0x9d, 0xd6, 0xd9, 0x37, + 0x3d, 0xc6, 0x6c, 0xd3, 0xc4, 0x69, 0xe5, 0x03, 0xfc, 0x85, 0x06, 0xf0, 0x74, 0xe0, 0x25, 0xbb, + 0xc6, 0x1c, 0x64, 0x87, 0x94, 0xb0, 0x70, 0x0b, 0x3e, 0x60, 0x3e, 0x41, 0x0c, 0x97, 0xf8, 0x3e, + 0x41, 0x07, 0xe8, 0x32, 0xe4, 0xfb, 0x0e, 0x19, 0x36, 0x8f, 0x87, 0x8c, 0xc9, 0xb4, 0x9e, 0xa3, + 0xc3, 0xed, 0x21, 0xba, 0x01, 0x25, 0xb3, 0x63, 0xd9, 0x0e, 0x69, 0x72, 0x5a, 0x59, 0x36, 0x5b, + 0xe4, 0x30, 0x26, 0xb7, 0x82, 0xc2, 0x09, 0xe7, 0x54, 0x94, 0x1d, 0x0a, 0xc2, 0x16, 0x14, 0x99, + 0xa8, 0x13, 0x99, 0xef, 0x5e, 0x20, 0x63, 0x8a, 0x2d, 0x1b, 0x35, 0xa1, 0x90, 0x1a, 0x7f, 0x00, + 0x68, 0x83, 0x74, 0x89, 0x47, 0x26, 0x89, 0x1e, 0x8a, 0x4d, 0xd2, 0xaa, 0x4d, 0xf0, 0x8f, 0x35, + 0x98, 0x0d, 0x91, 0x9f, 0x48, 0xad, 0x1a, 0xe4, 0xdb, 0x8c, 0x18, 0x97, 0x20, 0xad, 0xcb, 0x21, + 0x7a, 0x00, 0xd3, 0x42, 0x00, 0xb7, 0x96, 0x4e, 0x38, 0x34, 0x79, 0x2e, 0x93, 0x8b, 0xff, 0xa6, + 0x41, 0x41, 0x28, 0xba, 0xd7, 0x47, 0x6b, 0x50, 0x76, 0xf8, 0xa0, 0xc9, 0xf4, 0x11, 0x12, 0xd5, + 0x93, 0x83, 0xd0, 0xe6, 0x94, 0x5e, 0x12, 0x4b, 0x18, 0x18, 0xfd, 0x0f, 0x14, 0x25, 0x89, 0xfe, + 0xc0, 0x13, 0x26, 0xaf, 0x85, 0x09, 0x04, 0xe7, 0x6f, 0x73, 0x4a, 0x07, 0x81, 0xfe, 0x74, 0xe0, + 0xa1, 0x03, 0x98, 0x93, 0x8b, 0xb9, 0x36, 0x42, 0x8c, 0x34, 0xa3, 0xb2, 0x18, 0xa6, 0x32, 0xba, + 0x55, 0x9b, 0x53, 0x3a, 0x12, 0xeb, 0x95, 0xc9, 0x47, 0x05, 0xc8, 0x0b, 0x28, 0xfe, 0xbb, 0x06, + 0x20, 0x0d, 0xba, 0xd7, 0x47, 0x1b, 0x50, 0x71, 0xc4, 0x28, 0xa4, 0xf0, 0x95, 0x58, 0x85, 0xc5, + 0x3e, 0x4c, 0xe9, 0x65, 0xb9, 0x88, 0xab, 0xfc, 0x36, 0x94, 0x7c, 0x2a, 0x81, 0xce, 0xf3, 0x31, + 0x3a, 0xfb, 0x14, 0x8a, 0x72, 0x01, 0xd5, 0xfa, 0x3d, 0xb8, 0xe8, 0xaf, 0x8f, 0x51, 0xfb, 0xc6, + 0x18, 0xb5, 0x7d, 0x82, 0xb3, 0x92, 0x82, 0xaa, 0x38, 0xd0, 0x94, 0xc5, 0xc1, 0xf8, 0x8b, 0x34, + 0xe4, 0xd7, 0xed, 0x5e, 0xdf, 0x70, 0xe8, 0x1e, 0xe5, 0x1c, 0xe2, 0x0e, 0xba, 0x1e, 0x53, 0xb7, + 0xb2, 0x7a, 0x33, 0xcc, 0x41, 0xa0, 0xc9, 0xbf, 0x3a, 0x43, 0xd5, 0xc5, 0x12, 0xba, 0x58, 0x64, + 0xa8, 0xd4, 0x39, 0x16, 0x8b, 0xfc, 0x24, 0x96, 0x48, 0x5f, 0x4a, 0x07, 0xbe, 0x54, 0x87, 0xfc, + 0x90, 0x38, 0x41, 0x56, 0xdd, 0x9c, 0xd2, 0x25, 0x00, 0xdd, 0x83, 0x99, 0x68, 0x84, 0xcf, 0x0a, + 0x9c, 0x4a, 0x2b, 0x9c, 0x10, 0x6e, 0x42, 0x29, 0x94, 0x66, 0x72, 0x02, 0xaf, 0xd8, 0x53, 0xb2, + 0xcc, 0x25, 0x19, 0xda, 0x68, 0x4a, 0x2c, 0x6d, 0x4e, 0x89, 0xe0, 0x86, 0xff, 0x17, 0xca, 0x21, + 0x5d, 0x69, 0x14, 0x6f, 0xbc, 0xfb, 0x6c, 0x6d, 0x87, 0x87, 0xfc, 0xc7, 0x2c, 0xca, 0xeb, 0x55, + 0x8d, 0x66, 0x8e, 0x9d, 0xc6, 0xfe, 0x7e, 0x35, 0x85, 0xca, 0x50, 0xd8, 0xdd, 0x3b, 0x68, 0x72, + 0xac, 0x34, 0x7e, 0xcb, 0xa7, 0x20, 0x52, 0x86, 0x92, 0x29, 0xa6, 0x94, 0x4c, 0xa1, 0xc9, 0x4c, + 0x91, 0x0a, 0x32, 0x45, 0xfa, 0x51, 0x05, 0x4a, 0xdc, 0x3e, 0xcd, 0x81, 0x45, 0xb3, 0xd5, 0x2f, + 0x35, 0x80, 0x83, 0x13, 0x4b, 0x06, 0xa0, 0x15, 0xc8, 0xb7, 0x38, 0xf1, 0x9a, 0xc6, 0xfc, 0xf9, + 0x62, 0xac, 0xc9, 0x75, 0x89, 0x85, 0x5e, 0x87, 0xbc, 0x3b, 0x68, 0xb5, 0x88, 0x2b, 0xb3, 0xc6, + 0xe5, 0x68, 0x48, 0x11, 0x0e, 0xaf, 0x4b, 0x3c, 0xba, 0xe4, 0x85, 0x61, 0x76, 0x07, 0x2c, 0x87, + 0x8c, 0x5f, 0x22, 0xf0, 0xf0, 0xcf, 0x34, 0x28, 0x32, 0x29, 0x27, 0x8a, 0x63, 0x57, 0xa1, 0xc0, + 0x64, 0x20, 0x6d, 0x11, 0xc9, 0xa6, 0xf5, 0x00, 0x80, 0xfe, 0x1b, 0x0a, 0xf2, 0x04, 0xcb, 0x60, + 0x56, 0x8b, 0x27, 0xbb, 0xd7, 0xd7, 0x03, 0x54, 0xbc, 0x0d, 0x17, 0x98, 0x55, 0x5a, 0xb4, 0x3e, + 0x95, 0x76, 0x54, 0x2b, 0x38, 0x2d, 0x52, 0xc1, 0xd5, 0x61, 0xba, 0x7f, 0x74, 0xea, 0x9a, 0x2d, + 0xa3, 0x2b, 0xa4, 0xf0, 0xc7, 0xf8, 0xff, 0x00, 0xa9, 0xc4, 0x26, 0x51, 0x17, 0x97, 0xa1, 0xb8, + 0x69, 0xb8, 0x47, 0x42, 0x24, 0xfc, 0x3e, 0x94, 0xf8, 0x70, 0x22, 0x1b, 0x22, 0xc8, 0x1c, 0x19, + 0xee, 0x11, 0x13, 0xbc, 0xac, 0xb3, 0x6f, 0x7c, 0x01, 0x66, 0xf6, 0x2d, 0xa3, 0xef, 0x1e, 0xd9, + 0x32, 0xd6, 0xd2, 0xfa, 0xbc, 0x1a, 0xc0, 0x26, 0xe2, 0x78, 0x17, 0x66, 0x1c, 0xd2, 0x33, 0x4c, + 0xcb, 0xb4, 0x3a, 0xcd, 0xc3, 0x53, 0x8f, 0xb8, 0xa2, 0x7c, 0xaf, 0xf8, 0xe0, 0x47, 0x14, 0x4a, + 0x45, 0x3b, 0xec, 0xda, 0x87, 0xc2, 0xe3, 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0xa5, 0xf7, 0x0c, 0xaf, + 0x25, 0xad, 0x80, 0xb6, 0xa0, 0xe2, 0xfb, 0x39, 0x83, 0x08, 0x59, 0x22, 0x01, 0x9f, 0xad, 0x91, + 0x85, 0x9d, 0x0c, 0xf8, 0xe5, 0x96, 0x0a, 0x60, 0xa4, 0x0c, 0xab, 0x45, 0xba, 0x3e, 0xa9, 0x54, + 0x32, 0x29, 0x86, 0xa8, 0x92, 0x52, 0x01, 0x8f, 0x66, 0x82, 0x64, 0xc8, 0xdd, 0xf2, 0xe7, 0x29, + 0x40, 0xa3, 0x32, 0x7c, 0xd5, 0xfa, 0xe0, 0x36, 0x54, 0x5c, 0xcf, 0x70, 0xbc, 0x66, 0xe4, 0x72, + 0x53, 0x66, 0x50, 0x3f, 0x56, 0xdd, 0x85, 0x99, 0xbe, 0x63, 0x77, 0x1c, 0xe2, 0xba, 0x4d, 0xcb, + 0xf6, 0xcc, 0x17, 0xa7, 0xa2, 0xc4, 0xaa, 0x48, 0xf0, 0x2e, 0x83, 0xa2, 0x06, 0xe4, 0x5f, 0x98, + 0x5d, 0x8f, 0x38, 0x6e, 0x2d, 0xbb, 0x98, 0x5e, 0xaa, 0xac, 0x3e, 0x38, 0xcb, 0x6a, 0xcb, 0xef, + 0x30, 0xfc, 0x83, 0xd3, 0x3e, 0xd1, 0xe5, 0x5a, 0xb5, 0x6c, 0xc9, 0x85, 0xca, 0x96, 0xdb, 0x00, + 0x01, 0x3e, 0x8d, 0x5a, 0xbb, 0x7b, 0x4f, 0x9f, 0x1d, 0x54, 0xa7, 0x50, 0x09, 0xa6, 0x77, 0xf7, + 0x36, 0x1a, 0x3b, 0x0d, 0x1a, 0xd7, 0xf0, 0x8a, 0xb4, 0x8d, 0x6a, 0x43, 0x34, 0x0f, 0xd3, 0x2f, + 0x29, 0x54, 0xde, 0xfe, 0xd2, 0x7a, 0x9e, 0x8d, 0xb7, 0xda, 0xf8, 0x47, 0x29, 0x28, 0x8b, 0x53, + 0x30, 0xd1, 0x51, 0x54, 0x59, 0xa4, 0x42, 0x2c, 0x68, 0x8d, 0xc4, 0x4f, 0x47, 0x5b, 0x94, 0x62, + 0x72, 0x48, 0xdd, 0x9d, 0x6f, 0x36, 0x69, 0x0b, 0xb3, 0xfa, 0x63, 0x74, 0x0f, 0xaa, 0x2d, 0xee, + 0xee, 0x91, 0xb4, 0xa3, 0xcf, 0x08, 0xb8, 0x92, 0x75, 0xca, 0xfe, 0x69, 0x33, 0x5c, 0x91, 0x76, + 0x0a, 0x7a, 0x49, 0x1e, 0x24, 0x0a, 0x43, 0xb7, 0x21, 0x47, 0x86, 0xc4, 0xf2, 0xdc, 0x5a, 0x91, + 0x05, 0xb0, 0xb2, 0xac, 0xc6, 0x1a, 0x14, 0xaa, 0x8b, 0x49, 0xfc, 0x5f, 0x70, 0x81, 0x55, 0xbd, + 0x8f, 0x1d, 0xc3, 0x52, 0xcb, 0xf3, 0x83, 0x83, 0x1d, 0x61, 0x3a, 0xfa, 0x89, 0x2a, 0x90, 0xda, + 0xda, 0x10, 0x8a, 0xa6, 0xb6, 0x36, 0xf0, 0x27, 0x1a, 0x20, 0x75, 0xdd, 0x44, 0xb6, 0x8c, 0x10, + 0x97, 0xec, 0xd3, 0x01, 0xfb, 0x39, 0xc8, 0x12, 0xc7, 0xb1, 0x1d, 0x66, 0xb5, 0x82, 0xce, 0x07, + 0xf8, 0x96, 0x90, 0x41, 0x27, 0x43, 0xfb, 0xd8, 0x77, 0x0c, 0x4e, 0x4d, 0xf3, 0x45, 0xdd, 0x86, + 0xd9, 0x10, 0xd6, 0x44, 0x81, 0xf4, 0x2e, 0x5c, 0x64, 0xc4, 0xb6, 0x09, 0xe9, 0xaf, 0x75, 0xcd, + 0x61, 0x22, 0xd7, 0x3e, 0x5c, 0x8a, 0x22, 0x7e, 0xbd, 0x36, 0xc2, 0x6f, 0x09, 0x8e, 0x07, 0x66, + 0x8f, 0x1c, 0xd8, 0x3b, 0xc9, 0xb2, 0xd1, 0xe8, 0x48, 0x6f, 0xdd, 0x22, 0xe3, 0xb0, 0x6f, 0xfc, + 0x2b, 0x0d, 0x2e, 0x8f, 0x2c, 0xff, 0x9a, 0x77, 0x75, 0x01, 0xa0, 0x43, 0x8f, 0x0f, 0x69, 0xd3, + 0x09, 0x7e, 0x5f, 0x54, 0x20, 0xbe, 0x9c, 0x34, 0xc0, 0x94, 0x84, 0x9c, 0x47, 0x90, 0x7b, 0xc2, + 0x5a, 0x35, 0x8a, 0x56, 0x19, 0xa9, 0x95, 0x65, 0xf4, 0xf8, 0x05, 0xb2, 0xa0, 0xb3, 0x6f, 0x96, + 0x5f, 0x09, 0x71, 0x9e, 0xe9, 0x3b, 0x3c, 0x8f, 0x17, 0x74, 0x7f, 0x4c, 0xb9, 0xb7, 0xba, 0x26, + 0xb1, 0x3c, 0x36, 0x9b, 0x61, 0xb3, 0x0a, 0x04, 0x2f, 0x43, 0x95, 0x73, 0x5a, 0x6b, 0xb7, 0x95, + 0x5c, 0xee, 0xd3, 0xd3, 0xc2, 0xf4, 0xf0, 0xaf, 0x35, 0xb8, 0xa0, 0x2c, 0x98, 0xc8, 0x76, 0xaf, + 0x40, 0x8e, 0x37, 0xa4, 0x44, 0x1e, 0x99, 0x0b, 0xaf, 0xe2, 0x6c, 0x74, 0x81, 0x83, 0x96, 0x21, + 0xcf, 0xbf, 0x64, 0xb1, 0x12, 0x8f, 0x2e, 0x91, 0xf0, 0x6d, 0x98, 0x15, 0x20, 0xd2, 0xb3, 0xe3, + 0x8e, 0x09, 0x33, 0x28, 0xfe, 0x18, 0xe6, 0xc2, 0x68, 0x13, 0xa9, 0xa4, 0x08, 0x99, 0x3a, 0x8f, + 0x90, 0x6b, 0x52, 0xc8, 0x67, 0xfd, 0xb6, 0x92, 0xf6, 0xa2, 0xbb, 0xae, 0xee, 0x48, 0x2a, 0xb2, + 0x23, 0xbe, 0x02, 0x92, 0xc4, 0x37, 0xaa, 0xc0, 0xac, 0x3c, 0x0e, 0x3b, 0xa6, 0xeb, 0x17, 0x43, + 0x1f, 0x01, 0x52, 0x81, 0xdf, 0xb4, 0x40, 0x1b, 0xe4, 0x85, 0x63, 0x74, 0x7a, 0xc4, 0x0f, 0xf5, + 0xb4, 0xca, 0x54, 0x81, 0x13, 0x05, 0xc7, 0x3f, 0x68, 0x50, 0x5a, 0xeb, 0x1a, 0x4e, 0x4f, 0x6e, + 0xd6, 0xdb, 0x90, 0xe3, 0xe5, 0xab, 0xb8, 0xf1, 0xdd, 0x09, 0x93, 0x51, 0x71, 0xf9, 0x60, 0x8d, + 0x17, 0xbb, 0x62, 0x15, 0xdd, 0x5c, 0xd1, 0x97, 0xdd, 0x88, 0xf4, 0x69, 0x37, 0xd0, 0xab, 0x90, + 0x35, 0xe8, 0x12, 0x16, 0x50, 0x2a, 0xd1, 0x8b, 0x03, 0xa3, 0xc6, 0x4a, 0x0d, 0x8e, 0x85, 0xdf, + 0x84, 0xa2, 0xc2, 0x81, 0xde, 0x87, 0x1e, 0x37, 0x44, 0x39, 0xb1, 0xb6, 0x7e, 0xb0, 0xf5, 0x9c, + 0x5f, 0x93, 0x2a, 0x00, 0x1b, 0x0d, 0x7f, 0x9c, 0xc2, 0xef, 0x8b, 0x55, 0x22, 0xe4, 0xa8, 0xf2, + 0x68, 0x49, 0xf2, 0xa4, 0xce, 0x25, 0xcf, 0x09, 0x94, 0x85, 0xfa, 0x13, 0x9d, 0x81, 0xd7, 0x21, + 0xc7, 0xe8, 0xc9, 0x23, 0x30, 0x1f, 0xc3, 0x56, 0x46, 0x0b, 0x8e, 0x88, 0x67, 0xa0, 0xbc, 0xef, + 0x19, 0xde, 0xc0, 0x95, 0x47, 0xe0, 0xf7, 0x1a, 0x54, 0x24, 0x64, 0xd2, 0xe6, 0x90, 0xbc, 0x54, + 0xf3, 0x20, 0xec, 0x5f, 0xa9, 0x2f, 0x41, 0xae, 0x7d, 0xb8, 0x6f, 0x7e, 0x24, 0x1b, 0x79, 0x62, + 0x44, 0xe1, 0x5d, 0xce, 0x87, 0x77, 0xd3, 0xc5, 0x88, 0x5e, 0xcf, 0x1c, 0xe3, 0x85, 0xb7, 0x65, + 0xb5, 0xc9, 0x09, 0xab, 0x82, 0x32, 0x7a, 0x00, 0x60, 0x37, 0x2a, 0xd1, 0x75, 0x67, 0xa5, 0x8f, + 0xda, 0x85, 0x9f, 0x85, 0x0b, 0x6b, 0x03, 0xef, 0xa8, 0x61, 0x19, 0x87, 0x5d, 0x19, 0x34, 0xf0, + 0x1c, 0x20, 0x0a, 0xdc, 0x30, 0x5d, 0x15, 0xda, 0x80, 0x59, 0x0a, 0x25, 0x96, 0x67, 0xb6, 0x94, + 0x08, 0x23, 0xf3, 0x88, 0x16, 0xc9, 0x23, 0x86, 0xeb, 0xbe, 0xb4, 0x9d, 0xb6, 0x50, 0xcd, 0x1f, + 0xe3, 0x0d, 0x4e, 0xfc, 0x99, 0x1b, 0xca, 0x14, 0x5f, 0x95, 0xca, 0x52, 0x40, 0xe5, 0x31, 0xf1, + 0xc6, 0x50, 0xc1, 0x0f, 0xe0, 0xa2, 0xc4, 0x14, 0x5d, 0x97, 0x31, 0xc8, 0x7b, 0x70, 0x4d, 0x22, + 0xaf, 0x1f, 0xd1, 0xbb, 0xc0, 0x53, 0xc1, 0xf0, 0xdf, 0x95, 0xf3, 0x11, 0xd4, 0x7c, 0x39, 0x59, + 0xe9, 0x67, 0x77, 0x55, 0x01, 0x06, 0xae, 0x38, 0x33, 0x05, 0x9d, 0x7d, 0x53, 0x98, 0x63, 0x77, + 0xfd, 0xac, 0x4c, 0xbf, 0xf1, 0x3a, 0xcc, 0x4b, 0x1a, 0xa2, 0x28, 0x0b, 0x13, 0x19, 0x11, 0x28, + 0x8e, 0x88, 0x30, 0x18, 0x5d, 0x3a, 0xde, 0xec, 0x2a, 0x66, 0xd8, 0xb4, 0x8c, 0xa6, 0xa6, 0xd0, + 0xbc, 0xc8, 0x4f, 0x04, 0x15, 0x4c, 0x0d, 0xda, 0x02, 0x4c, 0x09, 0xa8, 0x60, 0xb1, 0x11, 0x14, + 0x3c, 0xb2, 0x11, 0x23, 0xa4, 0x3f, 0x80, 0x05, 0x5f, 0x08, 0x6a, 0xb7, 0xa7, 0xc4, 0xe9, 0x99, + 0xae, 0xab, 0xf4, 0x09, 0xe2, 0x14, 0xbf, 0x03, 0x99, 0x3e, 0x11, 0x31, 0xa5, 0xb8, 0x8a, 0x96, + 0xf9, 0xdb, 0xd8, 0xb2, 0xb2, 0x98, 0xcd, 0xe3, 0x36, 0x5c, 0x97, 0xd4, 0xb9, 0x45, 0x63, 0xc9, + 0x47, 0x85, 0x92, 0x77, 0x48, 0x6e, 0xd6, 0xd1, 0x3b, 0x64, 0x9a, 0xef, 0xbd, 0xbc, 0x43, 0xd2, + 0x5c, 0xa1, 0xfa, 0xd6, 0x44, 0xb9, 0x62, 0x9b, 0xdb, 0xd4, 0x77, 0xc9, 0x89, 0x88, 0x1d, 0xc2, + 0x5c, 0xd8, 0x93, 0x27, 0x0a, 0x63, 0x73, 0x90, 0xf5, 0xec, 0x63, 0x22, 0x83, 0x18, 0x1f, 0x48, + 0x81, 0x7d, 0x37, 0x9f, 0x48, 0x60, 0x23, 0x20, 0xc6, 0x8e, 0xe4, 0xa4, 0xf2, 0xd2, 0xdd, 0x94, + 0xf5, 0x0f, 0x1f, 0xe0, 0x5d, 0xb8, 0x14, 0x0d, 0x13, 0x13, 0x89, 0xfc, 0x9c, 0x1f, 0xe0, 0xb8, + 0x48, 0x32, 0x11, 0xdd, 0x77, 0x83, 0x60, 0xa0, 0x04, 0x94, 0x89, 0x48, 0xea, 0x50, 0x8f, 0x8b, + 0x2f, 0xff, 0x89, 0xf3, 0xea, 0x87, 0x9b, 0x89, 0x88, 0xb9, 0x01, 0xb1, 0xc9, 0xb7, 0x3f, 0x88, + 0x11, 0xe9, 0xb1, 0x31, 0x42, 0x38, 0x49, 0x10, 0xc5, 0xbe, 0x86, 0x43, 0x27, 0x78, 0x04, 0x01, + 0x74, 0x52, 0x1e, 0x34, 0x87, 0xf8, 0x3c, 0xd8, 0x40, 0x1e, 0x6c, 0x35, 0xec, 0x4e, 0xb4, 0x19, + 0xef, 0x05, 0xb1, 0x73, 0x24, 0x32, 0x4f, 0x44, 0xf8, 0x7d, 0x58, 0x4c, 0x0e, 0xca, 0x93, 0x50, + 0xbe, 0x8f, 0xa1, 0xe0, 0x17, 0x94, 0xca, 0xbb, 0x72, 0x11, 0xf2, 0xbb, 0x7b, 0xfb, 0x4f, 0xd7, + 0xd6, 0x1b, 0x55, 0x6d, 0xf5, 0x1f, 0x69, 0x48, 0x6d, 0x3f, 0x47, 0xdf, 0x82, 0x2c, 0x7f, 0x2e, + 0x1a, 0xf3, 0x9a, 0x56, 0x1f, 0xf7, 0xf0, 0x84, 0xaf, 0x7e, 0xf2, 0xa7, 0xbf, 0x7e, 0x9e, 0xba, + 0x84, 0x2f, 0xac, 0x0c, 0xdf, 0x30, 0xba, 0xfd, 0x23, 0x63, 0xe5, 0x78, 0xb8, 0xc2, 0x72, 0xc2, + 0x43, 0xed, 0x3e, 0x7a, 0x0e, 0xe9, 0xa7, 0x03, 0x0f, 0x25, 0x3e, 0xb5, 0xd5, 0x93, 0x1f, 0xa4, + 0x70, 0x9d, 0x51, 0x9e, 0xc3, 0x33, 0x2a, 0xe5, 0xfe, 0xc0, 0xa3, 0x74, 0x87, 0x50, 0x54, 0xde, + 0x94, 0xd0, 0x99, 0x8f, 0x70, 0xf5, 0xb3, 0xdf, 0xab, 0x30, 0x66, 0xfc, 0xae, 0xe2, 0xcb, 0x2a, + 0x3f, 0xfe, 0xf4, 0xa5, 0xea, 0x73, 0x70, 0x62, 0x45, 0xf5, 0x09, 0x9e, 0x45, 0xa2, 0xfa, 0x28, + 0x4f, 0x11, 0xf1, 0xfa, 0x78, 0x27, 0x16, 0xa5, 0x6b, 0x8b, 0x77, 0xb0, 0x96, 0x87, 0xae, 0xc7, + 0xbc, 0xa3, 0xa8, 0x2f, 0x06, 0xf5, 0xc5, 0x64, 0x04, 0xc1, 0xe9, 0x06, 0xe3, 0x74, 0x05, 0x5f, + 0x52, 0x39, 0xb5, 0x7c, 0xbc, 0x87, 0xda, 0xfd, 0xd5, 0x23, 0xc8, 0xb2, 0x3e, 0x27, 0x6a, 0xca, + 0x8f, 0x7a, 0x4c, 0x87, 0x36, 0xe1, 0x04, 0x84, 0x3a, 0xa4, 0x78, 0x9e, 0x71, 0x9b, 0xc5, 0x15, + 0x9f, 0x1b, 0x6b, 0x75, 0x3e, 0xd4, 0xee, 0x2f, 0x69, 0xaf, 0x69, 0xab, 0xdf, 0xcb, 0x40, 0x96, + 0xb5, 0x8e, 0x50, 0x1f, 0x20, 0x68, 0x0a, 0x46, 0xf5, 0x1c, 0x69, 0x33, 0x46, 0xf5, 0x1c, 0xed, + 0x27, 0xe2, 0xeb, 0x8c, 0xf3, 0x3c, 0x9e, 0xf3, 0x39, 0xb3, 0x57, 0xfb, 0x15, 0xd6, 0x24, 0xa2, + 0x66, 0x7d, 0x09, 0x45, 0xa5, 0xb9, 0x87, 0xe2, 0x28, 0x86, 0xba, 0x83, 0xd1, 0x63, 0x12, 0xd3, + 0x19, 0xc4, 0x37, 0x19, 0xd3, 0x6b, 0xb8, 0xa6, 0x1a, 0x97, 0xf3, 0x75, 0x18, 0x26, 0x65, 0xfc, + 0xa9, 0x06, 0x95, 0x70, 0x83, 0x0f, 0xdd, 0x8c, 0x21, 0x1d, 0xed, 0x13, 0xd6, 0x6f, 0x8d, 0x47, + 0x4a, 0x14, 0x81, 0xf3, 0x3f, 0x26, 0xa4, 0x6f, 0x50, 0x4c, 0x61, 0x7b, 0xf4, 0x7d, 0x0d, 0x66, + 0x22, 0x6d, 0x3b, 0x14, 0xc7, 0x62, 0xa4, 0x29, 0x58, 0xbf, 0x7d, 0x06, 0x96, 0x90, 0xe4, 0x2e, + 0x93, 0xe4, 0x06, 0xbe, 0x3a, 0x6a, 0x0c, 0xcf, 0xec, 0x11, 0xcf, 0x16, 0xd2, 0xac, 0xfe, 0x33, + 0x0d, 0xf9, 0x75, 0xfe, 0x13, 0x2b, 0xe4, 0x41, 0xc1, 0xef, 0x84, 0xa1, 0x85, 0xb8, 0xae, 0x44, + 0x50, 0xb2, 0xd7, 0xaf, 0x27, 0xce, 0x0b, 0x11, 0xee, 0x30, 0x11, 0x16, 0xf1, 0x15, 0x5f, 0x04, + 0xf1, 0x53, 0xae, 0x15, 0x7e, 0xf9, 0x5e, 0x31, 0xda, 0x6d, 0xba, 0x25, 0xdf, 0xd5, 0xa0, 0xa4, + 0x36, 0xac, 0xd0, 0x8d, 0xd8, 0x7e, 0x88, 0xda, 0xf3, 0xaa, 0xe3, 0x71, 0x28, 0x82, 0xff, 0x3d, + 0xc6, 0xff, 0x26, 0x5e, 0x48, 0xe2, 0xef, 0x30, 0xfc, 0xb0, 0x08, 0xbc, 0xe5, 0x14, 0x2f, 0x42, + 0xa8, 0xa3, 0x15, 0x2f, 0x42, 0xb8, 0x63, 0x75, 0xb6, 0x08, 0x03, 0x86, 0x4f, 0x45, 0x38, 0x01, + 0x08, 0x3a, 0x4c, 0x28, 0xd6, 0xb8, 0xca, 0x25, 0x26, 0xea, 0x83, 0xa3, 0xcd, 0xa9, 0x98, 0x13, + 0x10, 0xe1, 0xdd, 0x35, 0x5d, 0xea, 0x8b, 0xab, 0xbf, 0xcd, 0x40, 0xf1, 0x89, 0x61, 0x5a, 0x1e, + 0xb1, 0x0c, 0xab, 0x45, 0x50, 0x07, 0xb2, 0x2c, 0x4b, 0x45, 0x03, 0x8f, 0xda, 0xf6, 0x89, 0x06, + 0x9e, 0x50, 0x4f, 0x04, 0xdf, 0x66, 0xac, 0xaf, 0xe3, 0xba, 0xcf, 0xba, 0x17, 0xd0, 0x5f, 0x61, + 0xfd, 0x0c, 0xaa, 0xf2, 0x31, 0xe4, 0x78, 0xff, 0x02, 0x45, 0xa8, 0x85, 0xfa, 0x1c, 0xf5, 0xab, + 0xf1, 0x93, 0x89, 0xa7, 0x4c, 0xe5, 0xe5, 0x32, 0x64, 0xca, 0xec, 0xdb, 0x00, 0x41, 0xc3, 0x2c, + 0x6a, 0xdf, 0x91, 0xfe, 0x5a, 0x7d, 0x31, 0x19, 0x41, 0x30, 0xbe, 0xcf, 0x18, 0xdf, 0xc2, 0xd7, + 0x63, 0x19, 0xb7, 0xfd, 0x05, 0x94, 0x79, 0x0b, 0x32, 0x9b, 0x86, 0x7b, 0x84, 0x22, 0x49, 0x48, + 0x79, 0xdb, 0xad, 0xd7, 0xe3, 0xa6, 0x04, 0xab, 0x5b, 0x8c, 0xd5, 0x02, 0x9e, 0x8f, 0x65, 0x75, + 0x64, 0xb8, 0x34, 0xa6, 0xa3, 0x01, 0x4c, 0xcb, 0xf7, 0x5a, 0x74, 0x2d, 0x62, 0xb3, 0xf0, 0xdb, + 0x6e, 0x7d, 0x21, 0x69, 0x5a, 0x30, 0x5c, 0x62, 0x0c, 0x31, 0xbe, 0x16, 0x6f, 0x54, 0x81, 0xfe, + 0x50, 0xbb, 0xff, 0x9a, 0xb6, 0xfa, 0xc3, 0x2a, 0x64, 0x68, 0xbd, 0x44, 0xb3, 0x48, 0x70, 0xcd, + 0x8c, 0x5a, 0x78, 0xa4, 0xb9, 0x13, 0xb5, 0xf0, 0xe8, 0x0d, 0x35, 0x26, 0x8b, 0xb0, 0x1f, 0x9a, + 0x12, 0x86, 0x45, 0x35, 0xf6, 0xa0, 0xa8, 0x5c, 0x46, 0x51, 0x0c, 0xc5, 0x70, 0xeb, 0x28, 0x9a, + 0x45, 0x62, 0x6e, 0xb2, 0x78, 0x91, 0x31, 0xad, 0xe3, 0x8b, 0x61, 0xa6, 0x6d, 0x8e, 0x46, 0xb9, + 0x7e, 0x0c, 0x25, 0xf5, 0xd6, 0x8a, 0x62, 0x88, 0x46, 0x7a, 0x53, 0xd1, 0x58, 0x11, 0x77, 0xe9, + 0x8d, 0x71, 0x1a, 0xff, 0x67, 0xb5, 0x12, 0x97, 0x72, 0xff, 0x10, 0xf2, 0xe2, 0x2e, 0x1b, 0xa7, + 0x6f, 0xb8, 0x9b, 0x15, 0xa7, 0x6f, 0xe4, 0x22, 0x1c, 0x53, 0x92, 0x30, 0xb6, 0xb4, 0x66, 0x97, + 0x01, 0x5a, 0xb0, 0x7c, 0x4c, 0xbc, 0x24, 0x96, 0x41, 0x7f, 0x26, 0x89, 0xa5, 0x72, 0x5f, 0x1a, + 0xcb, 0xb2, 0x43, 0x3c, 0x71, 0x96, 0xe5, 0x65, 0x04, 0x25, 0x50, 0x54, 0xa3, 0x21, 0x1e, 0x87, + 0x92, 0x58, 0x45, 0x06, 0x5c, 0x45, 0x28, 0x44, 0xdf, 0x01, 0x08, 0x2e, 0xde, 0xd1, 0xc2, 0x20, + 0xb6, 0x7b, 0x17, 0x2d, 0x0c, 0xe2, 0xef, 0xee, 0x31, 0x1e, 0x1c, 0x30, 0xe7, 0x95, 0x2c, 0x65, + 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x88, 0x67, 0x11, 0xdb, 0x18, 0xac, 0xbf, 0x72, + 0x3e, 0xe4, 0xc4, 0xe8, 0x19, 0xc8, 0xd5, 0x62, 0x4b, 0xfa, 0x2f, 0xa9, 0x64, 0x9f, 0x69, 0x50, + 0x0e, 0x5d, 0xf5, 0xd1, 0x9d, 0x84, 0x7d, 0x8e, 0x34, 0x17, 0xeb, 0x77, 0xcf, 0xc4, 0x4b, 0xac, + 0x9d, 0x94, 0x53, 0x21, 0xeb, 0xc6, 0x1f, 0x68, 0x50, 0x09, 0xf7, 0x07, 0x50, 0x02, 0x83, 0x91, + 0x0e, 0x65, 0x7d, 0xe9, 0x6c, 0xc4, 0x73, 0xec, 0x56, 0x50, 0x4a, 0x7e, 0x08, 0x79, 0xd1, 0x56, + 0x88, 0x73, 0x8b, 0x70, 0x83, 0x33, 0xce, 0x2d, 0x22, 0x3d, 0x89, 0x24, 0xb7, 0xa0, 0x37, 0x74, + 0xc5, 0x13, 0x45, 0xf3, 0x21, 0x89, 0xe5, 0x78, 0x4f, 0x8c, 0x74, 0x2e, 0xc6, 0xb2, 0x0c, 0x3c, + 0x51, 0xb6, 0x1e, 0x50, 0x02, 0xc5, 0x33, 0x3c, 0x31, 0xda, 0xb9, 0x48, 0xf2, 0x44, 0xc6, 0x55, + 0xf1, 0xc4, 0xa0, 0x53, 0x10, 0xe7, 0x89, 0x23, 0xed, 0xdb, 0x38, 0x4f, 0x1c, 0x6d, 0x36, 0x24, + 0xed, 0x2d, 0x63, 0x1e, 0xf2, 0xc4, 0xd9, 0x98, 0xce, 0x02, 0x7a, 0x25, 0xc1, 0xa6, 0xb1, 0xad, + 0xe1, 0xfa, 0xab, 0xe7, 0xc4, 0x1e, 0xef, 0x01, 0x7c, 0x37, 0xa4, 0x07, 0xfc, 0x42, 0x83, 0xb9, + 0xb8, 0xd6, 0x04, 0x4a, 0x60, 0x96, 0xd0, 0x57, 0xae, 0x2f, 0x9f, 0x17, 0xfd, 0x1c, 0x76, 0xf3, + 0x7d, 0xe2, 0x51, 0xf5, 0x77, 0x5f, 0x2e, 0x68, 0x7f, 0xfc, 0x72, 0x41, 0xfb, 0xf3, 0x97, 0x0b, + 0xda, 0x4f, 0xff, 0xb2, 0x30, 0x75, 0x98, 0x63, 0xff, 0xdb, 0xe3, 0x8d, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x1c, 0x78, 0x24, 0x74, 0x32, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto new file mode 100644 index 000000000000..a6cd00ab7c3e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto @@ -0,0 +1,984 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcd/mvcc/mvccpb/kv.proto"; +import "etcd/auth/authpb/auth.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service KV { + // Range gets the keys in the range from the key-value store. + rpc Range(RangeRequest) returns (RangeResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/range" + body: "*" + }; + } + + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + rpc Put(PutRequest) returns (PutResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/put" + body: "*" + }; + } + + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/deleterange" + body: "*" + }; + } + + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + rpc Txn(TxnRequest) returns (TxnResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/txn" + body: "*" + }; + } + + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + rpc Compact(CompactionRequest) returns (CompactionResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/compaction" + body: "*" + }; + } +} + +service Watch { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + rpc Watch(stream WatchRequest) returns (stream WatchResponse) { + option (google.api.http) = { + post: "/v3alpha/watch" + body: "*" + }; + } +} + +service Lease { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { + option (google.api.http) = { + post: "/v3alpha/lease/grant" + body: "*" + }; + } + + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/lease/revoke" + body: "*" + }; + } + + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { + option (google.api.http) = { + post: "/v3alpha/lease/keepalive" + body: "*" + }; + } + + // LeaseTimeToLive retrieves lease information. + rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/lease/timetolive" + body: "*" + }; + } + + // TODO(xiangli) List all existing Leases? +} + +service Cluster { + // MemberAdd adds a member into the cluster. + rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { + option (google.api.http) = { + post: "/v3alpha/cluster/member/add" + body: "*" + }; + } + + // MemberRemove removes an existing member from the cluster. + rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { + option (google.api.http) = { + post: "/v3alpha/cluster/member/remove" + body: "*" + }; + } + + // MemberUpdate updates the member configuration. + rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { + option (google.api.http) = { + post: "/v3alpha/cluster/member/update" + body: "*" + }; + } + + // MemberList lists all the members in the cluster. + rpc MemberList(MemberListRequest) returns (MemberListResponse) { + option (google.api.http) = { + post: "/v3alpha/cluster/member/list" + body: "*" + }; + } +} + +service Maintenance { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + rpc Alarm(AlarmRequest) returns (AlarmResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/alarm" + body: "*" + }; + } + + // Status gets the status of the member. + rpc Status(StatusRequest) returns (StatusResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/status" + body: "*" + }; + } + + // Defragment defragments a member's backend database to recover storage space. + rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/defragment" + body: "*" + }; + } + + // Hash returns the hash of the local KV state for consistency checking purpose. + // This is designed for testing; do not use this in production when there + // are ongoing transactions. + rpc Hash(HashRequest) returns (HashResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/hash" + body: "*" + }; + } + + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/snapshot" + body: "*" + }; + } +} + +service Auth { + // AuthEnable enables authentication. + rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/enable" + body: "*" + }; + } + + // AuthDisable disables authentication. + rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/disable" + body: "*" + }; + } + + // Authenticate processes an authenticate request. + rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/authenticate" + body: "*" + }; + } + + // UserAdd adds a new user. + rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/add" + body: "*" + }; + } + + // UserGet gets detailed user information. + rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/get" + body: "*" + }; + } + + // UserList gets a list of all users. + rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/list" + body: "*" + }; + } + + // UserDelete deletes a specified user. + rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/delete" + body: "*" + }; + } + + // UserChangePassword changes the password of a specified user. + rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/changepw" + body: "*" + }; + } + + // UserGrant grants a role to a specified user. + rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/grant" + body: "*" + }; + } + + // UserRevokeRole revokes a role of specified user. + rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/revoke" + body: "*" + }; + } + + // RoleAdd adds a new role. + rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/add" + body: "*" + }; + } + + // RoleGet gets detailed role information. + rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/get" + body: "*" + }; + } + + // RoleList gets lists of all roles. + rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/list" + body: "*" + }; + } + + // RoleDelete deletes a specified role. + rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/delete" + body: "*" + }; + } + + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/grant" + body: "*" + }; + } + + // RoleRevokePermission revokes a key or range permission of a specified role. + rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/revoke" + body: "*" + }; + } +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + // member_id is the ID of the member which sent the response. + uint64 member_id = 2; + // revision is the key-value store revision when the request was applied. + int64 revision = 3; + // raft_term is the raft term when the request was applied. + uint64 raft_term = 4; +} + +message RangeRequest { + enum SortOrder { + NONE = 0; // default, no sorting + ASCEND = 1; // lowest target value first + DESCEND = 2; // highest target value first + } + enum SortTarget { + KEY = 0; + VERSION = 1; + CREATE = 2; + MOD = 3; + VALUE = 4; + } + + // key is the first key for the range. If range_end is not given, the request only looks up key. + bytes key = 1; + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + bytes range_end = 2; + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + int64 limit = 3; + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + int64 revision = 4; + + // sort_order is the order for returned sorted results. + SortOrder sort_order = 5; + + // sort_target is the key-value field to use for sorting. + SortTarget sort_target = 6; + + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + bool serializable = 7; + + // keys_only when set returns only the keys and not the values. + bool keys_only = 8; + + // count_only when set returns only the count of the keys in the range. + bool count_only = 9; + + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + int64 min_mod_revision = 10; + + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + int64 max_mod_revision = 11; + + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create trevisions will be filtered away. + int64 min_create_revision = 12; + + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + int64 max_create_revision = 13; +} + +message RangeResponse { + ResponseHeader header = 1; + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + repeated mvccpb.KeyValue kvs = 2; + // more indicates if there are more keys to return in the requested range. + bool more = 3; + // count is set to the number of keys within the range when requested. + int64 count = 4; +} + +message PutRequest { + // key is the key, in bytes, to put into the key-value store. + bytes key = 1; + // value is the value, in bytes, to associate with the key in the key-value store. + bytes value = 2; + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + int64 lease = 3; + + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; +} + +message PutResponse { + ResponseHeader header = 1; + // if prev_kv is set in the request, the previous key-value pair will be returned. + mvccpb.KeyValue prev_kv = 2; +} + +message DeleteRangeRequest { + // key is the first key to delete in the range. + bytes key = 1; + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + bytes range_end = 2; + + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + bool prev_kv = 3; +} + +message DeleteRangeResponse { + ResponseHeader header = 1; + // deleted is the number of keys deleted by the delete range request. + int64 deleted = 2; + // if prev_kv is set in the request, the previous key-value pairs will be returned. + repeated mvccpb.KeyValue prev_kvs = 3; +} + +message RequestOp { + // request is a union of request types accepted by a transaction. + oneof request { + RangeRequest request_range = 1; + PutRequest request_put = 2; + DeleteRangeRequest request_delete_range = 3; + } +} + +message ResponseOp { + // response is a union of response types returned by a transaction. + oneof response { + RangeResponse response_range = 1; + PutResponse response_put = 2; + DeleteRangeResponse response_delete_range = 3; + } +} + +message Compare { + enum CompareResult { + EQUAL = 0; + GREATER = 1; + LESS = 2; + NOT_EQUAL = 3; + } + enum CompareTarget { + VERSION = 0; + CREATE = 1; + MOD = 2; + VALUE= 3; + } + // result is logical comparison operation for this comparison. + CompareResult result = 1; + // target is the key-value field to inspect for the comparison. + CompareTarget target = 2; + // key is the subject key for the comparison operation. + bytes key = 3; + oneof target_union { + // version is the version of the given key + int64 version = 4; + // create_revision is the creation revision of the given key + int64 create_revision = 5; + // mod_revision is the last modified revision of the given key. + int64 mod_revision = 6; + // value is the value of the given key, in bytes. + bytes value = 7; + } +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +message TxnRequest { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + repeated Compare compare = 1; + // success is a list of requests which will be applied when compare evaluates to true. + repeated RequestOp success = 2; + // failure is a list of requests which will be applied when compare evaluates to false. + repeated RequestOp failure = 3; +} + +message TxnResponse { + ResponseHeader header = 1; + // succeeded is set to true if the compare evaluated to true or false otherwise. + bool succeeded = 2; + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + repeated ResponseOp responses = 3; +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +message CompactionRequest { + // revision is the key-value store revision for the compaction operation. + int64 revision = 1; + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + bool physical = 2; +} + +message CompactionResponse { + ResponseHeader header = 1; +} + +message HashRequest { +} + +message HashResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's key-value store. + uint32 hash = 2; +} + +message SnapshotRequest { +} + +message SnapshotResponse { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + ResponseHeader header = 1; + + // remaining_bytes is the number of blob bytes to be sent after this message + uint64 remaining_bytes = 2; + + // blob contains the next chunk of the snapshot in the snapshot stream. + bytes blob = 3; +} + +message WatchRequest { + // request_union is a request to either create a new watcher or cancel an existing watcher. + oneof request_union { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; + } +} + +message WatchCreateRequest { + // key is the key to register for watching. + bytes key = 1; + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + bytes range_end = 2; + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + int64 start_revision = 3; + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + bool progress_notify = 4; + + enum FilterType { + // filter out put event. + NOPUT = 0; + // filter out delete event. + NODELETE = 1; + } + // filters filter the events at server side before it sends back to the watcher. + repeated FilterType filters = 5; + + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + bool prev_kv = 6; +} + +message WatchCancelRequest { + // watch_id is the watcher id to cancel so that no more events are transmitted. + int64 watch_id = 1; +} + +message WatchResponse { + ResponseHeader header = 1; + // watch_id is the ID of the watcher that corresponds to the response. + int64 watch_id = 2; + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + bool created = 3; + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + bool canceled = 4; + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + int64 compact_revision = 5; + + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + + repeated mvccpb.Event events = 11; +} + +message LeaseGrantRequest { + // TTL is the advisory time-to-live in seconds. + int64 TTL = 1; + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + int64 ID = 2; +} + +message LeaseGrantResponse { + ResponseHeader header = 1; + // ID is the lease ID for the granted lease. + int64 ID = 2; + // TTL is the server chosen lease time-to-live in seconds. + int64 TTL = 3; + string error = 4; +} + +message LeaseRevokeRequest { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + int64 ID = 1; +} + +message LeaseRevokeResponse { + ResponseHeader header = 1; +} + +message LeaseKeepAliveRequest { + // ID is the lease ID for the lease to keep alive. + int64 ID = 1; +} + +message LeaseKeepAliveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the new time-to-live for the lease. + int64 TTL = 3; +} + +message LeaseTimeToLiveRequest { + // ID is the lease ID for the lease. + int64 ID = 1; + // keys is true to query all the keys attached to this lease. + bool keys = 2; +} + +message LeaseTimeToLiveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + int64 TTL = 3; + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + int64 grantedTTL = 4; + // Keys is the list of keys attached to this lease. + repeated bytes keys = 5; +} + +message Member { + // ID is the member ID for this member. + uint64 ID = 1; + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + string name = 2; + // peerURLs is the list of URLs the member exposes to the cluster for communication. + repeated string peerURLs = 3; + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + repeated string clientURLs = 4; +} + +message MemberAddRequest { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + repeated string peerURLs = 1; +} + +message MemberAddResponse { + ResponseHeader header = 1; + // member is the member information for the added member. + Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; +} + +message MemberRemoveRequest { + // ID is the member ID of the member to remove. + uint64 ID = 1; +} + +message MemberRemoveResponse { + ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; +} + +message MemberUpdateRequest { + // ID is the member ID of the member to update. + uint64 ID = 1; + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + repeated string peerURLs = 2; +} + +message MemberUpdateResponse{ + ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; +} + +message MemberListRequest { +} + +message MemberListResponse { + ResponseHeader header = 1; + // members is a list of all members associated with the cluster. + repeated Member members = 2; +} + +message DefragmentRequest { +} + +message DefragmentResponse { + ResponseHeader header = 1; +} + +enum AlarmType { + NONE = 0; // default, used to query if any alarm is active + NOSPACE = 1; // space quota is exhausted +} + +message AlarmRequest { + enum AlarmAction { + GET = 0; + ACTIVATE = 1; + DEACTIVATE = 2; + } + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + AlarmAction action = 1; + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + uint64 memberID = 2; + // alarm is the type of alarm to consider for this request. + AlarmType alarm = 3; +} + +message AlarmMember { + // memberID is the ID of the member associated with the raised alarm. + uint64 memberID = 1; + // alarm is the type of alarm which has been raised. + AlarmType alarm = 2; +} + +message AlarmResponse { + ResponseHeader header = 1; + // alarms is a list of alarms associated with the alarm request. + repeated AlarmMember alarms = 2; +} + +message StatusRequest { +} + +message StatusResponse { + ResponseHeader header = 1; + // version is the cluster protocol version used by the responding member. + string version = 2; + // dbSize is the size of the backend database, in bytes, of the responding member. + int64 dbSize = 3; + // leader is the member ID which the responding member believes is the current leader. + uint64 leader = 4; + // raftIndex is the current raft index of the responding member. + uint64 raftIndex = 5; + // raftTerm is the current raft term of the responding member. + uint64 raftTerm = 6; +} + +message AuthEnableRequest { +} + +message AuthDisableRequest { +} + +message AuthenticateRequest { + string name = 1; + string password = 2; +} + +message AuthUserAddRequest { + string name = 1; + string password = 2; +} + +message AuthUserGetRequest { + string name = 1; +} + +message AuthUserDeleteRequest { + // name is the name of the user to delete. + string name = 1; +} + +message AuthUserChangePasswordRequest { + // name is the name of the user whose password is being changed. + string name = 1; + // password is the new password for the user. + string password = 2; +} + +message AuthUserGrantRoleRequest { + // user is the name of the user which should be granted a given role. + string user = 1; + // role is the name of the role to grant to the user. + string role = 2; +} + +message AuthUserRevokeRoleRequest { + string name = 1; + string role = 2; +} + +message AuthRoleAddRequest { + // name is the name of the role to add to the authentication system. + string name = 1; +} + +message AuthRoleGetRequest { + string role = 1; +} + +message AuthUserListRequest { +} + +message AuthRoleListRequest { +} + +message AuthRoleDeleteRequest { + string role = 1; +} + +message AuthRoleGrantPermissionRequest { + // name is the name of the role which will be granted the permission. + string name = 1; + // perm is the permission to grant to the role. + authpb.Permission perm = 2; +} + +message AuthRoleRevokePermissionRequest { + string role = 1; + string key = 2; + string range_end = 3; +} + +message AuthEnableResponse { + ResponseHeader header = 1; +} + +message AuthDisableResponse { + ResponseHeader header = 1; +} + +message AuthenticateResponse { + ResponseHeader header = 1; + // token is an authorized token that can be used in succeeding RPCs + string token = 2; +} + +message AuthUserAddResponse { + ResponseHeader header = 1; +} + +message AuthUserGetResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserDeleteResponse { + ResponseHeader header = 1; +} + +message AuthUserChangePasswordResponse { + ResponseHeader header = 1; +} + +message AuthUserGrantRoleResponse { + ResponseHeader header = 1; +} + +message AuthUserRevokeRoleResponse { + ResponseHeader header = 1; +} + +message AuthRoleAddResponse { + ResponseHeader header = 1; +} + +message AuthRoleGetResponse { + ResponseHeader header = 1; + + repeated authpb.Permission perm = 2; +} + +message AuthRoleListResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserListResponse { + ResponseHeader header = 1; + + repeated string users = 2; +} + +message AuthRoleDeleteResponse { + ResponseHeader header = 1; +} + +message AuthRoleGrantPermissionResponse { + ResponseHeader header = 1; +} + +message AuthRoleRevokePermissionResponse { + ResponseHeader header = 1; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD new file mode 100644 index 000000000000..473575baffcb --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "cluster.go", + "doc.go", + "errors.go", + "member.go", + "store.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/membership", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/error:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", + "//vendor/github.com/coreos/etcd/store:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", + "//vendor/github.com/coreos/go-semver/semver:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go new file mode 100644 index 000000000000..2330219f18ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go @@ -0,0 +1,512 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/json" + "fmt" + "path" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/netutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/store" + "github.com/coreos/etcd/version" + "github.com/coreos/go-semver/semver" +) + +// RaftCluster is a list of Members that belong to the same raft cluster +type RaftCluster struct { + id types.ID + token string + + store store.Store + be backend.Backend + + sync.Mutex // guards the fields below + version *semver.Version + members map[types.ID]*Member + // removed contains the ids of removed members in the cluster. + // removed id cannot be reused. + removed map[types.ID]bool +} + +func NewClusterFromURLsMap(token string, urlsmap types.URLsMap) (*RaftCluster, error) { + c := NewCluster(token) + for name, urls := range urlsmap { + m := NewMember(name, urls, token, nil) + if _, ok := c.members[m.ID]; ok { + return nil, fmt.Errorf("member exists with identical ID %v", m) + } + if uint64(m.ID) == raft.None { + return nil, fmt.Errorf("cannot use %x as member id", raft.None) + } + c.members[m.ID] = m + } + c.genID() + return c, nil +} + +func NewClusterFromMembers(token string, id types.ID, membs []*Member) *RaftCluster { + c := NewCluster(token) + c.id = id + for _, m := range membs { + c.members[m.ID] = m + } + return c +} + +func NewCluster(token string) *RaftCluster { + return &RaftCluster{ + token: token, + members: make(map[types.ID]*Member), + removed: make(map[types.ID]bool), + } +} + +func (c *RaftCluster) ID() types.ID { return c.id } + +func (c *RaftCluster) Members() []*Member { + c.Lock() + defer c.Unlock() + var ms MembersByID + for _, m := range c.members { + ms = append(ms, m.Clone()) + } + sort.Sort(ms) + return []*Member(ms) +} + +func (c *RaftCluster) Member(id types.ID) *Member { + c.Lock() + defer c.Unlock() + return c.members[id].Clone() +} + +// MemberByName returns a Member with the given name if exists. +// If more than one member has the given name, it will panic. +func (c *RaftCluster) MemberByName(name string) *Member { + c.Lock() + defer c.Unlock() + var memb *Member + for _, m := range c.members { + if m.Name == name { + if memb != nil { + plog.Panicf("two members with the given name %q exist", name) + } + memb = m + } + } + return memb.Clone() +} + +func (c *RaftCluster) MemberIDs() []types.ID { + c.Lock() + defer c.Unlock() + var ids []types.ID + for _, m := range c.members { + ids = append(ids, m.ID) + } + sort.Sort(types.IDSlice(ids)) + return ids +} + +func (c *RaftCluster) IsIDRemoved(id types.ID) bool { + c.Lock() + defer c.Unlock() + return c.removed[id] +} + +// PeerURLs returns a list of all peer addresses. +// The returned list is sorted in ascending lexicographical order. +func (c *RaftCluster) PeerURLs() []string { + c.Lock() + defer c.Unlock() + urls := make([]string, 0) + for _, p := range c.members { + urls = append(urls, p.PeerURLs...) + } + sort.Strings(urls) + return urls +} + +// ClientURLs returns a list of all client addresses. +// The returned list is sorted in ascending lexicographical order. +func (c *RaftCluster) ClientURLs() []string { + c.Lock() + defer c.Unlock() + urls := make([]string, 0) + for _, p := range c.members { + urls = append(urls, p.ClientURLs...) + } + sort.Strings(urls) + return urls +} + +func (c *RaftCluster) String() string { + c.Lock() + defer c.Unlock() + b := &bytes.Buffer{} + fmt.Fprintf(b, "{ClusterID:%s ", c.id) + var ms []string + for _, m := range c.members { + ms = append(ms, fmt.Sprintf("%+v", m)) + } + fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) + var ids []string + for id := range c.removed { + ids = append(ids, id.String()) + } + fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) + return b.String() +} + +func (c *RaftCluster) genID() { + mIDs := c.MemberIDs() + b := make([]byte, 8*len(mIDs)) + for i, id := range mIDs { + binary.BigEndian.PutUint64(b[8*i:], uint64(id)) + } + hash := sha1.Sum(b) + c.id = types.ID(binary.BigEndian.Uint64(hash[:8])) +} + +func (c *RaftCluster) SetID(id types.ID) { c.id = id } + +func (c *RaftCluster) SetStore(st store.Store) { c.store = st } + +func (c *RaftCluster) SetBackend(be backend.Backend) { + c.be = be + mustCreateBackendBuckets(c.be) +} + +func (c *RaftCluster) Recover(onSet func(*semver.Version)) { + c.Lock() + defer c.Unlock() + + c.members, c.removed = membersFromStore(c.store) + c.version = clusterVersionFromStore(c.store) + mustDetectDowngrade(c.version) + onSet(c.version) + + for _, m := range c.members { + plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id) + } + if c.version != nil { + plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String())) + } +} + +// ValidateConfigurationChange takes a proposed ConfChange and +// ensures that it is still valid. +func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { + members, removed := membersFromStore(c.store) + id := types.ID(cc.NodeID) + if removed[id] { + return ErrIDRemoved + } + switch cc.Type { + case raftpb.ConfChangeAddNode: + if members[id] != nil { + return ErrIDExists + } + urls := make(map[string]bool) + for _, m := range members { + for _, u := range m.PeerURLs { + urls[u] = true + } + } + m := new(Member) + if err := json.Unmarshal(cc.Context, m); err != nil { + plog.Panicf("unmarshal member should never fail: %v", err) + } + for _, u := range m.PeerURLs { + if urls[u] { + return ErrPeerURLexists + } + } + case raftpb.ConfChangeRemoveNode: + if members[id] == nil { + return ErrIDNotFound + } + case raftpb.ConfChangeUpdateNode: + if members[id] == nil { + return ErrIDNotFound + } + urls := make(map[string]bool) + for _, m := range members { + if m.ID == id { + continue + } + for _, u := range m.PeerURLs { + urls[u] = true + } + } + m := new(Member) + if err := json.Unmarshal(cc.Context, m); err != nil { + plog.Panicf("unmarshal member should never fail: %v", err) + } + for _, u := range m.PeerURLs { + if urls[u] { + return ErrPeerURLexists + } + } + default: + plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode") + } + return nil +} + +// AddMember adds a new Member into the cluster, and saves the given member's +// raftAttributes into the store. The given member should have empty attributes. +// A Member with a matching id must not exist. +func (c *RaftCluster) AddMember(m *Member) { + c.Lock() + defer c.Unlock() + if c.store != nil { + mustSaveMemberToStore(c.store, m) + } + if c.be != nil { + mustSaveMemberToBackend(c.be, m) + } + + c.members[m.ID] = m + + plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id) +} + +// RemoveMember removes a member from the store. +// The given id MUST exist, or the function panics. +func (c *RaftCluster) RemoveMember(id types.ID) { + c.Lock() + defer c.Unlock() + if c.store != nil { + mustDeleteMemberFromStore(c.store, id) + } + if c.be != nil { + mustDeleteMemberFromBackend(c.be, id) + } + + delete(c.members, id) + c.removed[id] = true + + plog.Infof("removed member %s from cluster %s", id, c.id) +} + +func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) { + c.Lock() + defer c.Unlock() + if m, ok := c.members[id]; ok { + m.Attributes = attr + if c.store != nil { + mustUpdateMemberAttrInStore(c.store, m) + } + if c.be != nil { + mustSaveMemberToBackend(c.be, m) + } + return + } + _, ok := c.removed[id] + if !ok { + plog.Panicf("error updating attributes of unknown member %s", id) + } + plog.Warningf("skipped updating attributes of removed member %s", id) +} + +func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) { + c.Lock() + defer c.Unlock() + + c.members[id].RaftAttributes = raftAttr + if c.store != nil { + mustUpdateMemberInStore(c.store, c.members[id]) + } + if c.be != nil { + mustSaveMemberToBackend(c.be, c.members[id]) + } + + plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id) +} + +func (c *RaftCluster) Version() *semver.Version { + c.Lock() + defer c.Unlock() + if c.version == nil { + return nil + } + return semver.Must(semver.NewVersion(c.version.String())) +} + +func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) { + c.Lock() + defer c.Unlock() + if c.version != nil { + plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String())) + } else { + plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String())) + } + c.version = ver + mustDetectDowngrade(c.version) + if c.store != nil { + mustSaveClusterVersionToStore(c.store, ver) + } + if c.be != nil { + mustSaveClusterVersionToBackend(c.be, ver) + } + onSet(ver) +} + +func (c *RaftCluster) IsReadyToAddNewMember() bool { + nmembers := 1 + nstarted := 0 + + for _, member := range c.members { + if member.IsStarted() { + nstarted++ + } + nmembers++ + } + + if nstarted == 1 && nmembers == 2 { + // a case of adding a new node to 1-member cluster for restoring cluster data + // https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster + + plog.Debugf("The number of started member is 1. This cluster can accept add member request.") + return true + } + + nquorum := nmembers/2 + 1 + if nstarted < nquorum { + plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) + return false + } + + return true +} + +func (c *RaftCluster) IsReadyToRemoveMember(id uint64) bool { + nmembers := 0 + nstarted := 0 + + for _, member := range c.members { + if uint64(member.ID) == id { + continue + } + + if member.IsStarted() { + nstarted++ + } + nmembers++ + } + + nquorum := nmembers/2 + 1 + if nstarted < nquorum { + plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) + return false + } + + return true +} + +func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) { + members := make(map[types.ID]*Member) + removed := make(map[types.ID]bool) + e, err := st.Get(StoreMembersPrefix, true, true) + if err != nil { + if isKeyNotFound(err) { + return members, removed + } + plog.Panicf("get storeMembers should never fail: %v", err) + } + for _, n := range e.Node.Nodes { + var m *Member + m, err = nodeToMember(n) + if err != nil { + plog.Panicf("nodeToMember should never fail: %v", err) + } + members[m.ID] = m + } + + e, err = st.Get(storeRemovedMembersPrefix, true, true) + if err != nil { + if isKeyNotFound(err) { + return members, removed + } + plog.Panicf("get storeRemovedMembers should never fail: %v", err) + } + for _, n := range e.Node.Nodes { + removed[MustParseMemberIDFromKey(n.Key)] = true + } + return members, removed +} + +func clusterVersionFromStore(st store.Store) *semver.Version { + e, err := st.Get(path.Join(storePrefix, "version"), false, false) + if err != nil { + if isKeyNotFound(err) { + return nil + } + plog.Panicf("unexpected error (%v) when getting cluster version from store", err) + } + return semver.Must(semver.NewVersion(*e.Node.Value)) +} + +// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs +// with the existing cluster. If the validation succeeds, it assigns the IDs +// from the existing cluster to the local cluster. +// If the validation fails, an error will be returned. +func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) error { + ems := existing.Members() + lms := local.Members() + if len(ems) != len(lms) { + return fmt.Errorf("member count is unequal") + } + sort.Sort(MembersByPeerURLs(ems)) + sort.Sort(MembersByPeerURLs(lms)) + + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + for i := range ems { + if !netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs) { + return fmt.Errorf("unmatched member while checking PeerURLs") + } + lms[i].ID = ems[i].ID + } + local.members = make(map[types.ID]*Member) + for _, m := range lms { + local.members[m.ID] = m + } + return nil +} + +func mustDetectDowngrade(cv *semver.Version) { + lv := semver.Must(semver.NewVersion(version.Version)) + // only keep major.minor version for comparison against cluster version + lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} + if cv != nil && lv.LessThan(*cv) { + plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String())) + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go new file mode 100644 index 000000000000..b07fb2d92859 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package membership describes individual etcd members and clusters of members. +package membership diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go b/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go new file mode 100644 index 000000000000..e4d36af25477 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go @@ -0,0 +1,33 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "errors" + + etcdErr "github.com/coreos/etcd/error" +) + +var ( + ErrIDRemoved = errors.New("membership: ID removed") + ErrIDExists = errors.New("membership: ID exists") + ErrIDNotFound = errors.New("membership: ID not found") + ErrPeerURLexists = errors.New("membership: peerURL exists") +) + +func isKeyNotFound(err error) bool { + e, ok := err.(*etcdErr.Error) + return ok && e.ErrorCode == etcdErr.EcodeKeyNotFound +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go b/vendor/github.com/coreos/etcd/etcdserver/membership/member.go new file mode 100644 index 000000000000..6de74d26f8d4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/member.go @@ -0,0 +1,124 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "crypto/sha1" + "encoding/binary" + "fmt" + "math/rand" + "sort" + "time" + + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/pkg/capnslog" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership") +) + +// RaftAttributes represents the raft related attributes of an etcd member. +type RaftAttributes struct { + // PeerURLs is the list of peers in the raft cluster. + // TODO(philips): ensure these are URLs + PeerURLs []string `json:"peerURLs"` +} + +// Attributes represents all the non-raft related attributes of an etcd member. +type Attributes struct { + Name string `json:"name,omitempty"` + ClientURLs []string `json:"clientURLs,omitempty"` +} + +type Member struct { + ID types.ID `json:"id"` + RaftAttributes + Attributes +} + +// NewMember creates a Member without an ID and generates one based on the +// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member. +func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { + m := &Member{ + RaftAttributes: RaftAttributes{PeerURLs: peerURLs.StringSlice()}, + Attributes: Attributes{Name: name}, + } + + var b []byte + sort.Strings(m.PeerURLs) + for _, p := range m.PeerURLs { + b = append(b, []byte(p)...) + } + + b = append(b, []byte(clusterName)...) + if now != nil { + b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...) + } + + hash := sha1.Sum(b) + m.ID = types.ID(binary.BigEndian.Uint64(hash[:8])) + return m +} + +// PickPeerURL chooses a random address from a given Member's PeerURLs. +// It will panic if there is no PeerURLs available in Member. +func (m *Member) PickPeerURL() string { + if len(m.PeerURLs) == 0 { + plog.Panicf("member should always have some peer url") + } + return m.PeerURLs[rand.Intn(len(m.PeerURLs))] +} + +func (m *Member) Clone() *Member { + if m == nil { + return nil + } + mm := &Member{ + ID: m.ID, + Attributes: Attributes{ + Name: m.Name, + }, + } + if m.PeerURLs != nil { + mm.PeerURLs = make([]string, len(m.PeerURLs)) + copy(mm.PeerURLs, m.PeerURLs) + } + if m.ClientURLs != nil { + mm.ClientURLs = make([]string, len(m.ClientURLs)) + copy(mm.ClientURLs, m.ClientURLs) + } + return mm +} + +func (m *Member) IsStarted() bool { + return len(m.Name) != 0 +} + +// MembersByID implements sort by ID interface +type MembersByID []*Member + +func (ms MembersByID) Len() int { return len(ms) } +func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID } +func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] } + +// MembersByPeerURLs implements sort by peer urls interface +type MembersByPeerURLs []*Member + +func (ms MembersByPeerURLs) Len() int { return len(ms) } +func (ms MembersByPeerURLs) Less(i, j int) bool { + return ms[i].PeerURLs[0] < ms[j].PeerURLs[0] +} +func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] } diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go new file mode 100644 index 000000000000..d3f8f2474a42 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go @@ -0,0 +1,193 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "encoding/json" + "fmt" + "path" + + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/store" + + "github.com/coreos/go-semver/semver" +) + +const ( + attributesSuffix = "attributes" + raftAttributesSuffix = "raftAttributes" + + // the prefix for stroing membership related information in store provided by store pkg. + storePrefix = "/0" +) + +var ( + membersBucketName = []byte("members") + membersRemovedBucketName = []byte("members_removed") + clusterBucketName = []byte("cluster") + + StoreMembersPrefix = path.Join(storePrefix, "members") + storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") +) + +func mustSaveMemberToBackend(be backend.Backend, m *Member) { + mkey := backendMemberKey(m.ID) + mvalue, err := json.Marshal(m) + if err != nil { + plog.Panicf("marshal raftAttributes should never fail: %v", err) + } + + tx := be.BatchTx() + tx.Lock() + tx.UnsafePut(membersBucketName, mkey, mvalue) + tx.Unlock() +} + +func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) { + mkey := backendMemberKey(id) + + tx := be.BatchTx() + tx.Lock() + tx.UnsafeDelete(membersBucketName, mkey) + tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed")) + tx.Unlock() +} + +func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) { + ckey := backendClusterVersionKey() + + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String())) +} + +func mustSaveMemberToStore(s store.Store, m *Member) { + b, err := json.Marshal(m.RaftAttributes) + if err != nil { + plog.Panicf("marshal raftAttributes should never fail: %v", err) + } + p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) + if _, err := s.Create(p, false, string(b), false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + plog.Panicf("create raftAttributes should never fail: %v", err) + } +} + +func mustDeleteMemberFromStore(s store.Store, id types.ID) { + if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { + plog.Panicf("delete member should never fail: %v", err) + } + if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + plog.Panicf("create removedMember should never fail: %v", err) + } +} + +func mustUpdateMemberInStore(s store.Store, m *Member) { + b, err := json.Marshal(m.RaftAttributes) + if err != nil { + plog.Panicf("marshal raftAttributes should never fail: %v", err) + } + p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) + if _, err := s.Update(p, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + plog.Panicf("update raftAttributes should never fail: %v", err) + } +} + +func mustUpdateMemberAttrInStore(s store.Store, m *Member) { + b, err := json.Marshal(m.Attributes) + if err != nil { + plog.Panicf("marshal raftAttributes should never fail: %v", err) + } + p := path.Join(MemberStoreKey(m.ID), attributesSuffix) + if _, err := s.Set(p, false, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + plog.Panicf("update raftAttributes should never fail: %v", err) + } +} + +func mustSaveClusterVersionToStore(s store.Store, ver *semver.Version) { + if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + plog.Panicf("save cluster version should never fail: %v", err) + } +} + +// nodeToMember builds member from a key value node. +// the child nodes of the given node MUST be sorted by key. +func nodeToMember(n *store.NodeExtern) (*Member, error) { + m := &Member{ID: MustParseMemberIDFromKey(n.Key)} + attrs := make(map[string][]byte) + raftAttrKey := path.Join(n.Key, raftAttributesSuffix) + attrKey := path.Join(n.Key, attributesSuffix) + for _, nn := range n.Nodes { + if nn.Key != raftAttrKey && nn.Key != attrKey { + return nil, fmt.Errorf("unknown key %q", nn.Key) + } + attrs[nn.Key] = []byte(*nn.Value) + } + if data := attrs[raftAttrKey]; data != nil { + if err := json.Unmarshal(data, &m.RaftAttributes); err != nil { + return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err) + } + } else { + return nil, fmt.Errorf("raftAttributes key doesn't exist") + } + if data := attrs[attrKey]; data != nil { + if err := json.Unmarshal(data, &m.Attributes); err != nil { + return m, fmt.Errorf("unmarshal attributes error: %v", err) + } + } + return m, nil +} + +func backendMemberKey(id types.ID) []byte { + return []byte(id.String()) +} + +func backendClusterVersionKey() []byte { + return []byte("clusterVersion") +} + +func mustCreateBackendBuckets(be backend.Backend) { + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafeCreateBucket(membersBucketName) + tx.UnsafeCreateBucket(membersRemovedBucketName) + tx.UnsafeCreateBucket(clusterBucketName) +} + +func MemberStoreKey(id types.ID) string { + return path.Join(StoreMembersPrefix, id.String()) +} + +func StoreClusterVersionKey() string { + return path.Join(storePrefix, "version") +} + +func MemberAttributesStorePath(id types.ID) string { + return path.Join(MemberStoreKey(id), attributesSuffix) +} + +func MustParseMemberIDFromKey(key string) types.ID { + id, err := types.IDFromString(path.Base(key)) + if err != nil { + plog.Panicf("unexpected parse member id error: %v", err) + } + return id +} + +func RemovedMemberStoreKey(id types.ID) string { + return path.Join(storeRemovedMembersPrefix, id.String()) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go new file mode 100644 index 000000000000..90bbd3632a6e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/metrics.go @@ -0,0 +1,102 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "time" + + "github.com/coreos/etcd/pkg/runtime" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "has_leader", + Help: "Whether or not a leader exists. 1 is existence, 0 is not.", + }) + leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "leader_changes_seen_total", + Help: "The number of leader changes seen.", + }) + proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "proposals_committed_total", + Help: "The total number of consensus proposals committed.", + }) + proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "proposals_applied_total", + Help: "The total number of consensus proposals applied.", + }) + proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "proposals_pending", + Help: "The current number of pending proposals to commit.", + }) + proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "proposals_failed_total", + Help: "The total number of failed proposals seen.", + }) + leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "server", + Name: "lease_expired_total", + Help: "The total number of expired leases.", + }) +) + +func init() { + prometheus.MustRegister(hasLeader) + prometheus.MustRegister(leaderChanges) + prometheus.MustRegister(proposalsCommitted) + prometheus.MustRegister(proposalsApplied) + prometheus.MustRegister(proposalsPending) + prometheus.MustRegister(proposalsFailed) + prometheus.MustRegister(leaseExpired) +} + +func monitorFileDescriptor(done <-chan struct{}) { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + used, err := runtime.FDUsage() + if err != nil { + plog.Errorf("cannot monitor file descriptor usage (%v)", err) + return + } + limit, err := runtime.FDLimit() + if err != nil { + plog.Errorf("cannot monitor file descriptor usage (%v)", err) + return + } + if used >= limit/5*4 { + plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit) + } + select { + case <-ticker.C: + case <-done: + return + } + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go new file mode 100644 index 000000000000..87126f1564c1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/quota.go @@ -0,0 +1,121 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +const ( + // DefaultQuotaBytes is the number of bytes the backend Size may + // consume before exceeding the space quota. + DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB + // MaxQuotaBytes is the maximum number of bytes suggested for a backend + // quota. A larger quota may lead to degraded performance. + MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB +) + +// Quota represents an arbitrary quota against arbitrary requests. Each request +// costs some charge; if there is not enough remaining charge, then there are +// too few resources available within the quota to apply the request. +type Quota interface { + // Available judges whether the given request fits within the quota. + Available(req interface{}) bool + // Cost computes the charge against the quota for a given request. + Cost(req interface{}) int + // Remaining is the amount of charge left for the quota. + Remaining() int64 +} + +type passthroughQuota struct{} + +func (*passthroughQuota) Available(interface{}) bool { return true } +func (*passthroughQuota) Cost(interface{}) int { return 0 } +func (*passthroughQuota) Remaining() int64 { return 1 } + +type backendQuota struct { + s *EtcdServer + maxBackendBytes int64 +} + +const ( + // leaseOverhead is an estimate for the cost of storing a lease + leaseOverhead = 64 + // kvOverhead is an estimate for the cost of storing a key's metadata + kvOverhead = 256 +) + +func NewBackendQuota(s *EtcdServer) Quota { + if s.Cfg.QuotaBackendBytes < 0 { + // disable quotas if negative + plog.Warningf("disabling backend quota") + return &passthroughQuota{} + } + if s.Cfg.QuotaBackendBytes == 0 { + // use default size if no quota size given + return &backendQuota{s, DefaultQuotaBytes} + } + if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) + } + return &backendQuota{s, s.Cfg.QuotaBackendBytes} +} + +func (b *backendQuota) Available(v interface{}) bool { + // TODO: maybe optimize backend.Size() + return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes +} + +func (b *backendQuota) Cost(v interface{}) int { + switch r := v.(type) { + case *pb.PutRequest: + return costPut(r) + case *pb.TxnRequest: + return costTxn(r) + case *pb.LeaseGrantRequest: + return leaseOverhead + default: + panic("unexpected cost") + } +} + +func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) } + +func costTxnReq(u *pb.RequestOp) int { + r := u.GetRequestPut() + if r == nil { + return 0 + } + return costPut(r) +} + +func costTxn(r *pb.TxnRequest) int { + sizeSuccess := 0 + for _, u := range r.Success { + sizeSuccess += costTxnReq(u) + } + sizeFailure := 0 + for _, u := range r.Failure { + sizeFailure += costTxnReq(u) + } + if sizeFailure > sizeSuccess { + return sizeFailure + } + return sizeSuccess +} + +func (b *backendQuota) Remaining() int64 { + return b.maxBackendBytes - b.s.Backend().Size() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go new file mode 100644 index 000000000000..dcb894f82fb1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/raft.go @@ -0,0 +1,594 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "expvar" + "sort" + "sync" + "sync/atomic" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/contention" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/rafthttp" + "github.com/coreos/etcd/wal" + "github.com/coreos/etcd/wal/walpb" + "github.com/coreos/pkg/capnslog" +) + +const ( + // Number of entries for slow follower to catch-up after compacting + // the raft storage entries. + // We expect the follower has a millisecond level latency with the leader. + // The max throughput is around 10K. Keep a 5K entries is enough for helping + // follower to catch up. + numberOfCatchUpEntries = 5000 + + // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value). + // Assuming the RTT is around 10ms, 1MB max size is large enough. + maxSizePerMsg = 1 * 1024 * 1024 + // Never overflow the rafthttp buffer, which is 4096. + // TODO: a better const? + maxInflightMsgs = 4096 / 8 +) + +var ( + // protects raftStatus + raftStatusMu sync.Mutex + // indirection for expvar func interface + // expvar panics when publishing duplicate name + // expvar does not support remove a registered name + // so only register a func that calls raftStatus + // and change raftStatus as we need. + raftStatus func() raft.Status +) + +func init() { + raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft")) + expvar.Publish("raft.status", expvar.Func(func() interface{} { + raftStatusMu.Lock() + defer raftStatusMu.Unlock() + return raftStatus() + })) +} + +type RaftTimer interface { + Index() uint64 + Term() uint64 +} + +// apply contains entries, snapshot to be applied. Once +// an apply is consumed, the entries will be persisted to +// to raft storage concurrently; the application must read +// raftDone before assuming the raft messages are stable. +type apply struct { + entries []raftpb.Entry + snapshot raftpb.Snapshot + // notifyc synchronizes etcd server applies with the raft node + notifyc chan struct{} +} + +type raftNode struct { + // Cache of the latest raft index and raft term the server has seen. + // These three unit64 fields must be the first elements to keep 64-bit + // alignment for atomic access to the fields. + index uint64 + term uint64 + lead uint64 + + raftNodeConfig + + // a chan to send/receive snapshot + msgSnapC chan raftpb.Message + + // a chan to send out apply + applyc chan apply + + // a chan to send out readState + readStateC chan raft.ReadState + + // utility + ticker *time.Ticker + // contention detectors for raft heartbeat message + td *contention.TimeoutDetector + + stopped chan struct{} + done chan struct{} +} + +type raftNodeConfig struct { + // to check if msg receiver is removed from cluster + isIDRemoved func(id uint64) bool + raft.Node + raftStorage *raft.MemoryStorage + storage Storage + heartbeat time.Duration // for logging + // transport specifies the transport to send and receive msgs to members. + // Sending messages MUST NOT block. It is okay to drop messages, since + // clients should timeout and reissue their messages. + // If transport is nil, server will panic. + transport rafthttp.Transporter +} + +func newRaftNode(cfg raftNodeConfig) *raftNode { + r := &raftNode{ + raftNodeConfig: cfg, + // set up contention detectors for raft heartbeat message. + // expect to send a heartbeat within 2 heartbeat intervals. + td: contention.NewTimeoutDetector(2 * cfg.heartbeat), + readStateC: make(chan raft.ReadState, 1), + msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), + applyc: make(chan apply), + stopped: make(chan struct{}), + done: make(chan struct{}), + } + if r.heartbeat == 0 { + r.ticker = &time.Ticker{} + } else { + r.ticker = time.NewTicker(r.heartbeat) + } + return r +} + +// start prepares and starts raftNode in a new goroutine. It is no longer safe +// to modify the fields after it has been started. +func (r *raftNode) start(rh *raftReadyHandler) { + internalTimeout := time.Second + + go func() { + defer r.onStop() + islead := false + + for { + select { + case <-r.ticker.C: + r.Tick() + case rd := <-r.Ready(): + if rd.SoftState != nil { + newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead + if newLeader { + leaderChanges.Inc() + } + + if rd.SoftState.Lead == raft.None { + hasLeader.Set(0) + } else { + hasLeader.Set(1) + } + + atomic.StoreUint64(&r.lead, rd.SoftState.Lead) + islead = rd.RaftState == raft.StateLeader + rh.updateLeadership(newLeader) + r.td.Reset() + } + + if len(rd.ReadStates) != 0 { + select { + case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: + case <-time.After(internalTimeout): + plog.Warningf("timed out sending read state") + case <-r.stopped: + return + } + } + + notifyc := make(chan struct{}, 1) + ap := apply{ + entries: rd.CommittedEntries, + snapshot: rd.Snapshot, + notifyc: notifyc, + } + + updateCommittedIndex(&ap, rh) + + select { + case r.applyc <- ap: + case <-r.stopped: + return + } + + // the leader can write to its disk in parallel with replicating to the followers and them + // writing to their disks. + // For more details, check raft thesis 10.2.1 + if islead { + // gofail: var raftBeforeLeaderSend struct{} + r.transport.Send(r.processMessages(rd.Messages)) + } + + // gofail: var raftBeforeSave struct{} + if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { + plog.Fatalf("raft save state and entries error: %v", err) + } + if !raft.IsEmptyHardState(rd.HardState) { + proposalsCommitted.Set(float64(rd.HardState.Commit)) + } + // gofail: var raftAfterSave struct{} + + if !raft.IsEmptySnap(rd.Snapshot) { + // gofail: var raftBeforeSaveSnap struct{} + if err := r.storage.SaveSnap(rd.Snapshot); err != nil { + plog.Fatalf("raft save snapshot error: %v", err) + } + // etcdserver now claim the snapshot has been persisted onto the disk + notifyc <- struct{}{} + + // gofail: var raftAfterSaveSnap struct{} + r.raftStorage.ApplySnapshot(rd.Snapshot) + plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) + // gofail: var raftAfterApplySnap struct{} + } + + r.raftStorage.Append(rd.Entries) + + if !islead { + // finish processing incoming messages before we signal raftdone chan + msgs := r.processMessages(rd.Messages) + + // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots + notifyc <- struct{}{} + + // Candidate or follower needs to wait for all pending configuration + // changes to be applied before sending messages. + // Otherwise we might incorrectly count votes (e.g. votes from removed members). + // Also slow machine's follower raft-layer could proceed to become the leader + // on its own single-node cluster, before apply-layer applies the config change. + // We simply wait for ALL pending entries to be applied for now. + // We might improve this later on if it causes unnecessary long blocking issues. + waitApply := false + for _, ent := range rd.CommittedEntries { + if ent.Type == raftpb.EntryConfChange { + waitApply = true + break + } + } + if waitApply { + // blocks until 'applyAll' calls 'applyWait.Trigger' + // to be in sync with scheduled config-change job + // (assume notifyc has cap of 1) + select { + case notifyc <- struct{}{}: + case <-r.stopped: + return + } + } + + // gofail: var raftBeforeFollowerSend struct{} + r.transport.Send(msgs) + } else { + // leader already processed 'MsgSnap' and signaled + notifyc <- struct{}{} + } + + r.Advance() + case <-r.stopped: + return + } + } + }() +} + +func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { + var ci uint64 + if len(ap.entries) != 0 { + ci = ap.entries[len(ap.entries)-1].Index + } + if ap.snapshot.Metadata.Index > ci { + ci = ap.snapshot.Metadata.Index + } + if ci != 0 { + rh.updateCommittedIndex(ci) + } +} + +func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { + sentAppResp := false + for i := len(ms) - 1; i >= 0; i-- { + if r.isIDRemoved(ms[i].To) { + ms[i].To = 0 + } + + if ms[i].Type == raftpb.MsgAppResp { + if sentAppResp { + ms[i].To = 0 + } else { + sentAppResp = true + } + } + + if ms[i].Type == raftpb.MsgSnap { + // There are two separate data store: the store for v2, and the KV for v3. + // The msgSnap only contains the most recent snapshot of store without KV. + // So we need to redirect the msgSnap to etcd server main loop for merging in the + // current store snapshot and KV snapshot. + select { + case r.msgSnapC <- ms[i]: + default: + // drop msgSnap if the inflight chan if full. + } + ms[i].To = 0 + } + if ms[i].Type == raftpb.MsgHeartbeat { + ok, exceed := r.td.Observe(ms[i].To) + if !ok { + // TODO: limit request rate. + plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed) + plog.Warningf("server is likely overloaded") + } + } + } + return ms +} + +func (r *raftNode) apply() chan apply { + return r.applyc +} + +func (r *raftNode) stop() { + r.stopped <- struct{}{} + <-r.done +} + +func (r *raftNode) onStop() { + r.Stop() + r.ticker.Stop() + r.transport.Stop() + if err := r.storage.Close(); err != nil { + plog.Panicf("raft close storage error: %v", err) + } + close(r.done) +} + +// for testing +func (r *raftNode) pauseSending() { + p := r.transport.(rafthttp.Pausable) + p.Pause() +} + +func (r *raftNode) resumeSending() { + p := r.transport.(rafthttp.Pausable) + p.Resume() +} + +// advanceTicksForElection advances ticks to the node for fast election. +// This reduces the time to wait for first leader election if bootstrapping the whole +// cluster, while leaving at least 1 heartbeat for possible existing leader +// to contact it. +func advanceTicksForElection(n raft.Node, electionTicks int) { + for i := 0; i < electionTicks-1; i++ { + n.Tick() + } +} + +func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { + var err error + member := cl.MemberByName(cfg.Name) + metadata := pbutil.MustMarshal( + &pb.Metadata{ + NodeID: uint64(member.ID), + ClusterID: uint64(cl.ID()), + }, + ) + if w, err = wal.Create(cfg.WALDir(), metadata); err != nil { + plog.Fatalf("create wal error: %v", err) + } + peers := make([]raft.Peer, len(ids)) + for i, id := range ids { + ctx, err := json.Marshal((*cl).Member(id)) + if err != nil { + plog.Panicf("marshal member should never fail: %v", err) + } + peers[i] = raft.Peer{ID: uint64(id), Context: ctx} + } + id = member.ID + plog.Infof("starting member %s in cluster %s", id, cl.ID()) + s = raft.NewMemoryStorage() + c := &raft.Config{ + ID: uint64(id), + ElectionTick: cfg.ElectionTicks, + HeartbeatTick: 1, + Storage: s, + MaxSizePerMsg: maxSizePerMsg, + MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + } + + n = raft.StartNode(c, peers) + raftStatusMu.Lock() + raftStatus = n.Status + raftStatusMu.Unlock() + advanceTicksForElection(n, c.ElectionTick) + return +} + +func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) + + plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) + cl := membership.NewCluster("") + cl.SetID(cid) + s := raft.NewMemoryStorage() + if snapshot != nil { + s.ApplySnapshot(*snapshot) + } + s.SetHardState(st) + s.Append(ents) + c := &raft.Config{ + ID: uint64(id), + ElectionTick: cfg.ElectionTicks, + HeartbeatTick: 1, + Storage: s, + MaxSizePerMsg: maxSizePerMsg, + MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + } + + n := raft.RestartNode(c) + raftStatusMu.Lock() + raftStatus = n.Status + raftStatusMu.Unlock() + advanceTicksForElection(n, c.ElectionTick) + return id, cl, n, s, w +} + +func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) + + // discard the previously uncommitted entries + for i, ent := range ents { + if ent.Index > st.Commit { + plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) + ents = ents[:i] + break + } + } + + // force append the configuration change entries + toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit) + ents = append(ents, toAppEnts...) + + // force commit newly appended entries + err := w.Save(raftpb.HardState{}, toAppEnts) + if err != nil { + plog.Fatalf("%v", err) + } + if len(ents) != 0 { + st.Commit = ents[len(ents)-1].Index + } + + plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) + cl := membership.NewCluster("") + cl.SetID(cid) + s := raft.NewMemoryStorage() + if snapshot != nil { + s.ApplySnapshot(*snapshot) + } + s.SetHardState(st) + s.Append(ents) + c := &raft.Config{ + ID: uint64(id), + ElectionTick: cfg.ElectionTicks, + HeartbeatTick: 1, + Storage: s, + MaxSizePerMsg: maxSizePerMsg, + MaxInflightMsgs: maxInflightMsgs, + } + n := raft.RestartNode(c) + raftStatus = n.Status + return id, cl, n, s, w +} + +// getIDs returns an ordered set of IDs included in the given snapshot and +// the entries. The given snapshot/entries can contain two kinds of +// ID-related entry: +// - ConfChangeAddNode, in which case the contained ID will be added into the set. +// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. +func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { + ids := make(map[uint64]bool) + if snap != nil { + for _, id := range snap.Metadata.ConfState.Nodes { + ids[id] = true + } + } + for _, e := range ents { + if e.Type != raftpb.EntryConfChange { + continue + } + var cc raftpb.ConfChange + pbutil.MustUnmarshal(&cc, e.Data) + switch cc.Type { + case raftpb.ConfChangeAddNode: + ids[cc.NodeID] = true + case raftpb.ConfChangeRemoveNode: + delete(ids, cc.NodeID) + case raftpb.ConfChangeUpdateNode: + // do nothing + default: + plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") + } + } + sids := make(types.Uint64Slice, 0, len(ids)) + for id := range ids { + sids = append(sids, id) + } + sort.Sort(sids) + return []uint64(sids) +} + +// createConfigChangeEnts creates a series of Raft entries (i.e. +// EntryConfChange) to remove the set of given IDs from the cluster. The ID +// `self` is _not_ removed, even if present in the set. +// If `self` is not inside the given ids, it creates a Raft entry to add a +// default member with the given `self`. +func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry { + ents := make([]raftpb.Entry, 0) + next := index + 1 + found := false + for _, id := range ids { + if id == self { + found = true + continue + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + if !found { + m := membership.Member{ + ID: types.ID(self), + RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, + } + ctx, err := json.Marshal(m) + if err != nil { + plog.Panicf("marshal member should never fail: %v", err) + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: self, + Context: ctx, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + } + return ents +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go new file mode 100644 index 000000000000..271c5e773137 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/server.go @@ -0,0 +1,1664 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "expvar" + "fmt" + "math" + "math/rand" + "net/http" + "os" + "path" + "regexp" + "sync" + "sync/atomic" + "time" + + "github.com/coreos/etcd/alarm" + "github.com/coreos/etcd/auth" + "github.com/coreos/etcd/compactor" + "github.com/coreos/etcd/discovery" + "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/etcdserver/stats" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/pkg/idutil" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/pkg/runtime" + "github.com/coreos/etcd/pkg/schedule" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/pkg/wait" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/rafthttp" + "github.com/coreos/etcd/snap" + "github.com/coreos/etcd/store" + "github.com/coreos/etcd/version" + "github.com/coreos/etcd/wal" + "github.com/coreos/go-semver/semver" + "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" +) + +const ( + DefaultSnapCount = 100000 + + StoreClusterPrefix = "/0" + StoreKeysPrefix = "/1" + + // HealthInterval is the minimum time the cluster should be healthy + // before accepting add member requests. + HealthInterval = 5 * time.Second + + purgeFileInterval = 30 * time.Second + // monitorVersionInterval should be smaller than the timeout + // on the connection. Or we will not be able to reuse the connection + // (since it will timeout). + monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second + + // max number of in-flight snapshot messages etcdserver allows to have + // This number is more than enough for most clusters with 5 machines. + maxInFlightMsgSnap = 16 + + releaseDelayAfterSnapshot = 30 * time.Second + + // maxPendingRevokes is the maximum number of outstanding expired lease revocations. + maxPendingRevokes = 16 + recommendedMaxRequestBytes = 10 * 1024 * 1024 +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver") + + storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")) +) + +func init() { + rand.Seed(time.Now().UnixNano()) + + expvar.Publish( + "file_descriptor_limit", + expvar.Func( + func() interface{} { + n, _ := runtime.FDLimit() + return n + }, + ), + ) +} + +type Response struct { + Event *store.Event + Watcher store.Watcher + err error +} + +type Server interface { + // Start performs any initialization of the Server necessary for it to + // begin serving requests. It must be called before Do or Process. + // Start must be non-blocking; any long-running server functionality + // should be implemented in goroutines. + Start() + // Stop terminates the Server and performs any necessary finalization. + // Do and Process cannot be called after Stop has been invoked. + Stop() + // ID returns the ID of the Server. + ID() types.ID + // Leader returns the ID of the leader Server. + Leader() types.ID + // Do takes a request and attempts to fulfill it, returning a Response. + Do(ctx context.Context, r pb.Request) (Response, error) + // Process takes a raft message and applies it to the server's raft state + // machine, respecting any timeout of the given context. + Process(ctx context.Context, m raftpb.Message) error + // AddMember attempts to add a member into the cluster. It will return + // ErrIDRemoved if member ID is removed from the cluster, or return + // ErrIDExists if member ID exists in the cluster. + AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) + // RemoveMember attempts to remove a member from the cluster. It will + // return ErrIDRemoved if member ID is removed from the cluster, or return + // ErrIDNotFound if member ID is not in the cluster. + RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) + + // UpdateMember attempts to update an existing member in the cluster. It will + // return ErrIDNotFound if the member ID does not exist. + UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) + + // ClusterVersion is the cluster-wide minimum major.minor version. + // Cluster version is set to the min version that an etcd member is + // compatible with when first bootstrap. + // + // ClusterVersion is nil until the cluster is bootstrapped (has a quorum). + // + // During a rolling upgrades, the ClusterVersion will be updated + // automatically after a sync. (5 second by default) + // + // The API/raft component can utilize ClusterVersion to determine if + // it can accept a client request or a raft RPC. + // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and + // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since + // this feature is introduced post 2.0. + ClusterVersion() *semver.Version +} + +// EtcdServer is the production implementation of the Server interface +type EtcdServer struct { + // inflightSnapshots holds count the number of snapshots currently inflight. + inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. + appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. + committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. + // consistIndex used to hold the offset of current executing entry + // It is initialized to 0 before executing any entry. + consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned. + Cfg *ServerConfig + + readych chan struct{} + r raftNode + + snapCount uint64 + + w wait.Wait + + readMu sync.RWMutex + // read routine notifies etcd server that it waits for reading by sending an empty struct to + // readwaitC + readwaitc chan struct{} + // readNotifier is used to notify the read routine that it can process the request + // when there is no error + readNotifier *notifier + + // stop signals the run goroutine should shutdown. + stop chan struct{} + // stopping is closed by run goroutine on shutdown. + stopping chan struct{} + // done is closed when all goroutines from start() complete. + done chan struct{} + + errorc chan error + id types.ID + attributes membership.Attributes + + cluster *membership.RaftCluster + + store store.Store + snapshotter *snap.Snapshotter + + applyV2 ApplierV2 + + // applyV3 is the applier with auth and quotas + applyV3 applierV3 + // applyV3Base is the core applier without auth or quotas + applyV3Base applierV3 + applyWait wait.WaitTime + + kv mvcc.ConsistentWatchableKV + lessor lease.Lessor + bemu sync.Mutex + be backend.Backend + authStore auth.AuthStore + alarmStore *alarm.AlarmStore + + stats *stats.ServerStats + lstats *stats.LeaderStats + + SyncTicker *time.Ticker + // compactor is used to auto-compact the KV. + compactor *compactor.Periodic + + // peerRt used to send requests (version, lease) to peers. + peerRt http.RoundTripper + reqIDGen *idutil.Generator + + // forceVersionC is used to force the version monitor loop + // to detect the cluster version immediately. + forceVersionC chan struct{} + + // wgMu blocks concurrent waitgroup mutation while server stopping + wgMu sync.RWMutex + // wg is used to wait for the go routines that depends on the server state + // to exit when stopping the server. + wg sync.WaitGroup + + // ctx is used for etcd-initiated requests that may need to be canceled + // on etcd server shutdown. + ctx context.Context + cancel context.CancelFunc + + leadTimeMu sync.RWMutex + leadElectedTime time.Time +} + +// NewServer creates a new EtcdServer from the supplied configuration. The +// configuration is considered static for the lifetime of the EtcdServer. +func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { + st := store.New(StoreClusterPrefix, StoreKeysPrefix) + + var ( + w *wal.WAL + n raft.Node + s *raft.MemoryStorage + id types.ID + cl *membership.RaftCluster + ) + + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { + plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + } + + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { + return nil, fmt.Errorf("cannot access data directory: %v", terr) + } + + haveWAL := wal.Exist(cfg.WALDir()) + + if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { + plog.Fatalf("create snapshot directory error: %v", err) + } + ss := snap.New(cfg.SnapDir()) + + bepath := cfg.backendPath() + beExist := fileutil.Exist(bepath) + be := openBackend(cfg) + + defer func() { + if err != nil { + be.Close() + } + }() + + prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout()) + if err != nil { + return nil, err + } + var ( + remotes []*membership.Member + snapshot *raftpb.Snapshot + ) + + switch { + case !haveWAL && !cfg.NewCluster: + if err = cfg.VerifyJoinExisting(); err != nil { + return nil, err + } + cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + if err != nil { + return nil, err + } + existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt) + if gerr != nil { + return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) + } + if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil { + return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) + } + if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) { + return nil, fmt.Errorf("incompatible with current running cluster") + } + + remotes = existingCluster.Members() + cl.SetID(existingCluster.ID()) + cl.SetStore(st) + cl.SetBackend(be) + cfg.Print() + id, n, s, w = startNode(cfg, cl, nil) + case !haveWAL && cfg.NewCluster: + if err = cfg.VerifyBootstrap(); err != nil { + return nil, err + } + cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + if err != nil { + return nil, err + } + m := cl.MemberByName(cfg.Name) + if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) { + return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) + } + if cfg.ShouldDiscover() { + var str string + str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) + if err != nil { + return nil, &DiscoveryError{Op: "join", Err: err} + } + var urlsmap types.URLsMap + urlsmap, err = types.NewURLsMap(str) + if err != nil { + return nil, err + } + if checkDuplicateURL(urlsmap) { + return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) + } + if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil { + return nil, err + } + } + cl.SetStore(st) + cl.SetBackend(be) + cfg.PrintWithInitial() + id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) + case haveWAL: + if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { + return nil, fmt.Errorf("cannot write to member directory: %v", err) + } + + if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { + return nil, fmt.Errorf("cannot write to WAL directory: %v", err) + } + + if cfg.ShouldDiscover() { + plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) + } + snapshot, err = ss.Load() + if err != nil && err != snap.ErrNoSnapshot { + return nil, err + } + if snapshot != nil { + if err = st.Recovery(snapshot.Data); err != nil { + plog.Panicf("recovered store from snapshot error: %v", err) + } + plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { + plog.Panicf("recovering backend from snapshot error: %v", err) + } + } + cfg.Print() + if !cfg.ForceNewCluster { + id, cl, n, s, w = restartNode(cfg, snapshot) + } else { + id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) + } + cl.SetStore(st) + cl.SetBackend(be) + cl.Recover(api.UpdateCapability) + if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist { + os.RemoveAll(bepath) + return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) + } + default: + return nil, fmt.Errorf("unsupported bootstrap config") + } + + if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil { + return nil, fmt.Errorf("cannot access member directory: %v", terr) + } + + sstats := stats.NewServerStats(cfg.Name, id.String()) + lstats := stats.NewLeaderStats(id.String()) + + heartbeat := time.Duration(cfg.TickMs) * time.Millisecond + srv = &EtcdServer{ + readych: make(chan struct{}), + Cfg: cfg, + snapCount: cfg.SnapCount, + errorc: make(chan error, 1), + store: st, + snapshotter: ss, + r: *newRaftNode( + raftNodeConfig{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + heartbeat: heartbeat, + raftStorage: s, + storage: NewStorage(w, ss), + }, + ), + id: id, + attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, + cluster: cl, + stats: sstats, + lstats: lstats, + SyncTicker: time.NewTicker(500 * time.Millisecond), + peerRt: prt, + reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), + forceVersionC: make(chan struct{}), + } + + srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster} + + srv.be = be + minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat + + // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. + // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. + srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds()))) + srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex) + if beExist { + kvindex := srv.kv.ConsistentIndex() + // TODO: remove kvindex != 0 checking when we do not expect users to upgrade + // etcd from pre-3.0 release. + if snapshot != nil && kvindex < snapshot.Metadata.Index { + if kvindex != 0 { + return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index) + } + plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) + } + } + newSrv := srv // since srv == nil in defer if srv is returned as nil + defer func() { + // closing backend without first closing kv can cause + // resumed compactions to fail with closed tx errors + if err != nil { + newSrv.kv.Close() + } + }() + + srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + tp, err := auth.NewTokenProvider(cfg.AuthToken, + func(index uint64) <-chan struct{} { + return srv.applyWait.Wait(index) + }, + ) + if err != nil { + plog.Errorf("failed to create token provider: %s", err) + return nil, err + } + srv.authStore = auth.NewAuthStore(srv.be, tp) + if h := cfg.AutoCompactionRetention; h != 0 { + srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) + srv.compactor.Run() + } + + srv.applyV3Base = &applierV3backend{srv} + if err = srv.restoreAlarms(); err != nil { + return nil, err + } + + // TODO: move transport initialization near the definition of remote + tr := &rafthttp.Transport{ + TLSInfo: cfg.PeerTLSInfo, + DialTimeout: cfg.peerDialTimeout(), + ID: id, + URLs: cfg.PeerURLs, + ClusterID: cl.ID(), + Raft: srv, + Snapshotter: ss, + ServerStats: sstats, + LeaderStats: lstats, + ErrorC: srv.errorc, + } + if err = tr.Start(); err != nil { + return nil, err + } + // add all remotes into transport + for _, m := range remotes { + if m.ID != id { + tr.AddRemote(m.ID, m.PeerURLs) + } + } + for _, m := range cl.Members() { + if m.ID != id { + tr.AddPeer(m.ID, m.PeerURLs) + } + } + srv.r.transport = tr + + return srv, nil +} + +// Start prepares and starts server in a new goroutine. It is no longer safe to +// modify a server's fields after it has been sent to Start. +// It also starts a goroutine to publish its server information. +func (s *EtcdServer) Start() { + s.start() + s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) + s.goAttach(s.purgeFile) + s.goAttach(func() { monitorFileDescriptor(s.stopping) }) + s.goAttach(s.monitorVersions) + s.goAttach(s.linearizableReadLoop) +} + +// start prepares and starts server in a new goroutine. It is no longer safe to +// modify a server's fields after it has been sent to Start. +// This function is just used for testing. +func (s *EtcdServer) start() { + if s.snapCount == 0 { + plog.Infof("set snapshot count to default %d", DefaultSnapCount) + s.snapCount = DefaultSnapCount + } + s.w = wait.New() + s.applyWait = wait.NewTimeList() + s.done = make(chan struct{}) + s.stop = make(chan struct{}) + s.stopping = make(chan struct{}) + s.ctx, s.cancel = context.WithCancel(context.Background()) + s.readwaitc = make(chan struct{}, 1) + s.readNotifier = newNotifier() + if s.ClusterVersion() != nil { + plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) + } else { + plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) + } + // TODO: if this is an empty log, writes all peer infos + // into the first entry + go s.run() +} + +func (s *EtcdServer) purgeFile() { + var serrc, werrc <-chan error + if s.Cfg.MaxSnapFiles > 0 { + serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) + } + if s.Cfg.MaxWALFiles > 0 { + werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) + } + select { + case e := <-werrc: + plog.Fatalf("failed to purge wal file %v", e) + case e := <-serrc: + plog.Fatalf("failed to purge snap file %v", e) + case <-s.stopping: + return + } +} + +func (s *EtcdServer) ID() types.ID { return s.id } + +func (s *EtcdServer) Cluster() *membership.RaftCluster { return s.cluster } + +func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() } + +func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor } + +func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } + +func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { + if s.cluster.IsIDRemoved(types.ID(m.From)) { + plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) + return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") + } + if m.Type == raftpb.MsgApp { + s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) + } + return s.r.Step(ctx, m) +} + +func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) } + +func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) } + +// ReportSnapshot reports snapshot sent status to the raft state machine, +// and clears the used snapshot from the snapshot store. +func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) { + s.r.ReportSnapshot(id, status) +} + +type etcdProgress struct { + confState raftpb.ConfState + snapi uint64 + appliedt uint64 + appliedi uint64 +} + +// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode, +// and helps decouple state machine logic from Raft algorithms. +// TODO: add a state machine interface to apply the commit entries and do snapshot/recover +type raftReadyHandler struct { + updateLeadership func(newLeader bool) + updateCommittedIndex func(uint64) +} + +func (s *EtcdServer) run() { + sn, err := s.r.raftStorage.Snapshot() + if err != nil { + plog.Panicf("get snapshot from raft storage error: %v", err) + } + + // asynchronously accept apply packets, dispatch progress in-order + sched := schedule.NewFIFOScheduler() + + var ( + smu sync.RWMutex + syncC <-chan time.Time + ) + setSyncC := func(ch <-chan time.Time) { + smu.Lock() + syncC = ch + smu.Unlock() + } + getSyncC := func() (ch <-chan time.Time) { + smu.RLock() + ch = syncC + smu.RUnlock() + return + } + rh := &raftReadyHandler{ + updateLeadership: func(newLeader bool) { + if !s.isLeader() { + if s.lessor != nil { + s.lessor.Demote() + } + if s.compactor != nil { + s.compactor.Pause() + } + setSyncC(nil) + } else { + if newLeader { + t := time.Now() + s.leadTimeMu.Lock() + s.leadElectedTime = t + s.leadTimeMu.Unlock() + } + setSyncC(s.SyncTicker.C) + if s.compactor != nil { + s.compactor.Resume() + } + } + + // TODO: remove the nil checking + // current test utility does not provide the stats + if s.stats != nil { + s.stats.BecomeLeader() + } + }, + updateCommittedIndex: func(ci uint64) { + cci := s.getCommittedIndex() + if ci > cci { + s.setCommittedIndex(ci) + } + }, + } + s.r.start(rh) + + ep := etcdProgress{ + confState: sn.Metadata.ConfState, + snapi: sn.Metadata.Index, + appliedt: sn.Metadata.Term, + appliedi: sn.Metadata.Index, + } + + defer func() { + s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping + close(s.stopping) + s.wgMu.Unlock() + s.cancel() + + sched.Stop() + + // wait for gouroutines before closing raft so wal stays open + s.wg.Wait() + + s.SyncTicker.Stop() + + // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines + // by adding a peer after raft stops the transport + s.r.stop() + + // kv, lessor and backend can be nil if running without v3 enabled + // or running unit tests. + if s.lessor != nil { + s.lessor.Stop() + } + if s.kv != nil { + s.kv.Close() + } + if s.authStore != nil { + s.authStore.Close() + } + if s.be != nil { + s.be.Close() + } + if s.compactor != nil { + s.compactor.Stop() + } + close(s.done) + }() + + var expiredLeaseC <-chan []*lease.Lease + if s.lessor != nil { + expiredLeaseC = s.lessor.ExpiredLeasesC() + } + + for { + select { + case ap := <-s.r.apply(): + f := func(context.Context) { s.applyAll(&ep, &ap) } + sched.Schedule(f) + case leases := <-expiredLeaseC: + s.goAttach(func() { + // Increases throughput of expired leases deletion process through parallelization + c := make(chan struct{}, maxPendingRevokes) + for _, lease := range leases { + select { + case c <- struct{}{}: + case <-s.stopping: + return + } + lid := lease.ID + s.goAttach(func() { + s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + leaseExpired.Inc() + <-c + }) + } + }) + case err := <-s.errorc: + plog.Errorf("%s", err) + plog.Infof("the data-dir used by this member must be removed.") + return + case <-getSyncC(): + if s.store.HasTTLKeys() { + s.sync(s.Cfg.ReqTimeout()) + } + case <-s.stop: + return + } + } +} + +func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { + s.applySnapshot(ep, apply) + st := time.Now() + s.applyEntries(ep, apply) + d := time.Since(st) + entriesNum := len(apply.entries) + if entriesNum != 0 && d > time.Duration(entriesNum)*warnApplyDuration { + plog.Warningf("apply entries took too long [%v for %d entries]", d, len(apply.entries)) + plog.Warningf("avoid queries with large range/delete range!") + } + proposalsApplied.Set(float64(ep.appliedi)) + s.applyWait.Trigger(ep.appliedi) + // wait for the raft routine to finish the disk writes before triggering a + // snapshot. or applied index might be greater than the last index in raft + // storage, since the raft routine might be slower than apply routine. + <-apply.notifyc + + s.triggerSnapshot(ep) + select { + // snapshot requested via send() + case m := <-s.r.msgSnapC: + merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState) + s.sendMergedSnap(merged) + default: + } +} + +func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { + if raft.IsEmptySnap(apply.snapshot) { + return + } + + plog.Infof("applying snapshot at index %d...", ep.snapi) + defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) + + if apply.snapshot.Metadata.Index <= ep.appliedi { + plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", + apply.snapshot.Metadata.Index, ep.appliedi) + } + + // wait for raftNode to persist snapshot onto the disk + <-apply.notifyc + + newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) + if err != nil { + plog.Panic(err) + } + + // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. + // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. + if s.lessor != nil { + plog.Info("recovering lessor...") + s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) + plog.Info("finished recovering lessor") + } + + plog.Info("restoring mvcc store...") + + if err := s.kv.Restore(newbe); err != nil { + plog.Panicf("restore KV error: %v", err) + } + s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) + + plog.Info("finished restoring mvcc store") + + // Closing old backend might block until all the txns + // on the backend are finished. + // We do not want to wait on closing the old backend. + s.bemu.Lock() + oldbe := s.be + go func() { + plog.Info("closing old backend...") + defer plog.Info("finished closing old backend") + + if err := oldbe.Close(); err != nil { + plog.Panicf("close backend error: %v", err) + } + }() + + s.be = newbe + s.bemu.Unlock() + + plog.Info("recovering alarms...") + if err := s.restoreAlarms(); err != nil { + plog.Panicf("restore alarms error: %v", err) + } + plog.Info("finished recovering alarms") + + if s.authStore != nil { + plog.Info("recovering auth store...") + s.authStore.Recover(newbe) + plog.Info("finished recovering auth store") + } + + plog.Info("recovering store v2...") + if err := s.store.Recovery(apply.snapshot.Data); err != nil { + plog.Panicf("recovery store error: %v", err) + } + plog.Info("finished recovering store v2") + + s.cluster.SetBackend(s.be) + plog.Info("recovering cluster configuration...") + s.cluster.Recover(api.UpdateCapability) + plog.Info("finished recovering cluster configuration") + + plog.Info("removing old peers from network...") + // recover raft transport + s.r.transport.RemoveAllPeers() + plog.Info("finished removing old peers from network") + + plog.Info("adding peers from new cluster configuration into network...") + for _, m := range s.cluster.Members() { + if m.ID == s.ID() { + continue + } + s.r.transport.AddPeer(m.ID, m.PeerURLs) + } + plog.Info("finished adding peers from new cluster configuration into network...") + + ep.appliedt = apply.snapshot.Metadata.Term + ep.appliedi = apply.snapshot.Metadata.Index + ep.snapi = ep.appliedi + ep.confState = apply.snapshot.Metadata.ConfState +} + +func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { + if len(apply.entries) == 0 { + return + } + firsti := apply.entries[0].Index + if firsti > ep.appliedi+1 { + plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) + } + var ents []raftpb.Entry + if ep.appliedi+1-firsti < uint64(len(apply.entries)) { + ents = apply.entries[ep.appliedi+1-firsti:] + } + if len(ents) == 0 { + return + } + var shouldstop bool + if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop { + go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster")) + } +} + +func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { + if ep.appliedi-ep.snapi <= s.snapCount { + return + } + + plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) + s.snapshot(ep.appliedi, ep.confState) + ep.snapi = ep.appliedi +} + +func (s *EtcdServer) isMultiNode() bool { + return s.cluster != nil && len(s.cluster.MemberIDs()) > 1 +} + +func (s *EtcdServer) isLeader() bool { + return uint64(s.ID()) == s.Lead() +} + +// transferLeadership transfers the leader to the given transferee. +// TODO: maybe expose to client? +func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error { + now := time.Now() + interval := time.Duration(s.Cfg.TickMs) * time.Millisecond + + plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) + s.r.TransferLeadership(ctx, lead, transferee) + for s.Lead() != transferee { + select { + case <-ctx.Done(): // time out + return ErrTimeoutLeaderTransfer + case <-time.After(interval): + } + } + + // TODO: drain all requests, or drop all messages to the old leader + + plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) + return nil +} + +// TransferLeadership transfers the leader to the chosen transferee. +func (s *EtcdServer) TransferLeadership() error { + if !s.isLeader() { + plog.Printf("skipped leadership transfer for stopping non-leader member") + return nil + } + + if !s.isMultiNode() { + plog.Printf("skipped leadership transfer for single member cluster") + return nil + } + + transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs()) + if !ok { + return ErrUnhealthy + } + + tm := s.Cfg.ReqTimeout() + ctx, cancel := context.WithTimeout(s.ctx, tm) + err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) + cancel() + return err +} + +// HardStop stops the server without coordination with other members in the cluster. +func (s *EtcdServer) HardStop() { + select { + case s.stop <- struct{}{}: + case <-s.done: + return + } + <-s.done +} + +// Stop stops the server gracefully, and shuts down the running goroutine. +// Stop should be called after a Start(s), otherwise it will block forever. +// When stopping leader, Stop transfers its leadership to one of its peers +// before stopping the server. +func (s *EtcdServer) Stop() { + if err := s.TransferLeadership(); err != nil { + plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) + } + s.HardStop() +} + +// ReadyNotify returns a channel that will be closed when the server +// is ready to serve client requests +func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych } + +func (s *EtcdServer) stopWithDelay(d time.Duration, err error) { + select { + case <-time.After(d): + case <-s.done: + } + select { + case s.errorc <- err: + default: + } +} + +// StopNotify returns a channel that receives a empty struct +// when the server is stopped. +func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } + +func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } + +func (s *EtcdServer) LeaderStats() []byte { + lead := atomic.LoadUint64(&s.r.lead) + if lead != uint64(s.id) { + return nil + } + return s.lstats.JSON() +} + +func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } + +func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { + if s.authStore == nil { + // In the context of ordinary etcd process, s.authStore will never be nil. + // This branch is for handling cases in server_test.go + return nil + } + + // Note that this permission check is done in the API layer, + // so TOCTOU problem can be caused potentially in a schedule like this: + // update membership with user A -> revoke root role of A -> apply membership change + // in the state machine layer + // However, both of membership change and role management requires the root privilege. + // So careful operation by admins can prevent the problem. + authInfo, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + + return s.AuthStore().IsAdminPermitted(authInfo) +} + +func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + + if s.Cfg.StrictReconfigCheck { + // by default StrictReconfigCheck is enabled; reject new members if unhealthy + if !s.cluster.IsReadyToAddNewMember() { + plog.Warningf("not enough started members, rejecting member add %+v", memb) + return nil, ErrNotEnoughStartedMembers + } + if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { + plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) + return nil, ErrUnhealthy + } + } + + // TODO: move Member to protobuf type + b, err := json.Marshal(memb) + if err != nil { + return nil, err + } + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: uint64(memb.ID), + Context: b, + } + return s.configure(ctx, cc) +} + +func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + + // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss + if err := s.mayRemoveMember(types.ID(id)); err != nil { + return nil, err + } + + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + return s.configure(ctx, cc) +} + +func (s *EtcdServer) mayRemoveMember(id types.ID) error { + if !s.Cfg.StrictReconfigCheck { + return nil + } + + if !s.cluster.IsReadyToRemoveMember(uint64(id)) { + plog.Warningf("not enough started members, rejecting remove member %s", id) + return ErrNotEnoughStartedMembers + } + + // downed member is safe to remove since it's not part of the active quorum + if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() { + return nil + } + + // protect quorum if some members are down + m := s.cluster.Members() + active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) + if (active - 1) < 1+((len(m)-1)/2) { + plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) + return ErrUnhealthy + } + + return nil +} + +func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { + b, merr := json.Marshal(memb) + if merr != nil { + return nil, merr + } + + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeUpdateNode, + NodeID: uint64(memb.ID), + Context: b, + } + return s.configure(ctx, cc) +} + +// Implement the RaftTimer interface + +func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) } + +func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) } + +// Lead is only for testing purposes. +// TODO: add Raft server interface to expose raft related info: +// Index, Term, Lead, Committed, Applied, LastIndex, etc. +func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } + +func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } + +type confChangeResponse struct { + membs []*membership.Member + err error +} + +// configure sends a configuration change through consensus and +// then waits for it to be applied to the server. It +// will block until the change is performed or there is an error. +func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { + cc.ID = s.reqIDGen.Next() + ch := s.w.Register(cc.ID) + start := time.Now() + if err := s.r.ProposeConfChange(ctx, cc); err != nil { + s.w.Trigger(cc.ID, nil) + return nil, err + } + select { + case x := <-ch: + if x == nil { + plog.Panicf("configure trigger value should never be nil") + } + resp := x.(*confChangeResponse) + return resp.membs, resp.err + case <-ctx.Done(): + s.w.Trigger(cc.ID, nil) // GC wait + return nil, s.parseProposeCtxErr(ctx.Err(), start) + case <-s.stopping: + return nil, ErrStopped + } +} + +// sync proposes a SYNC request and is non-blocking. +// This makes no guarantee that the request will be proposed or performed. +// The request will be canceled after the given timeout. +func (s *EtcdServer) sync(timeout time.Duration) { + req := pb.Request{ + Method: "SYNC", + ID: s.reqIDGen.Next(), + Time: time.Now().UnixNano(), + } + data := pbutil.MustMarshal(&req) + // There is no promise that node has leader when do SYNC request, + // so it uses goroutine to propose. + ctx, cancel := context.WithTimeout(s.ctx, timeout) + s.goAttach(func() { + s.r.Propose(ctx, data) + cancel() + }) +} + +// publish registers server information into the cluster. The information +// is the JSON representation of this server's member struct, updated with the +// static clientURLs of the server. +// The function keeps attempting to register until it succeeds, +// or its server is stopped. +func (s *EtcdServer) publish(timeout time.Duration) { + b, err := json.Marshal(s.attributes) + if err != nil { + plog.Panicf("json marshal error: %v", err) + return + } + req := pb.Request{ + Method: "PUT", + Path: membership.MemberAttributesStorePath(s.id), + Val: string(b), + } + + for { + ctx, cancel := context.WithTimeout(s.ctx, timeout) + _, err := s.Do(ctx, req) + cancel() + switch err { + case nil: + close(s.readych) + plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) + return + case ErrStopped: + plog.Infof("aborting publish because server is stopped") + return + default: + plog.Errorf("publish error: %v", err) + } + } +} + +func (s *EtcdServer) sendMergedSnap(merged snap.Message) { + atomic.AddInt64(&s.inflightSnapshots, 1) + + s.r.transport.SendSnapshot(merged) + s.goAttach(func() { + select { + case ok := <-merged.CloseNotify(): + // delay releasing inflight snapshot for another 30 seconds to + // block log compaction. + // If the follower still fails to catch up, it is probably just too slow + // to catch up. We cannot avoid the snapshot cycle anyway. + if ok { + select { + case <-time.After(releaseDelayAfterSnapshot): + case <-s.stopping: + } + } + atomic.AddInt64(&s.inflightSnapshots, -1) + case <-s.stopping: + return + } + }) +} + +// apply takes entries received from Raft (after it has been committed) and +// applies them to the current state of the EtcdServer. +// The given entries should not be empty. +func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) { + for i := range es { + e := es[i] + switch e.Type { + case raftpb.EntryNormal: + s.applyEntryNormal(&e) + case raftpb.EntryConfChange: + // set the consistent index of current executing entry + if e.Index > s.consistIndex.ConsistentIndex() { + s.consistIndex.setConsistentIndex(e.Index) + } + var cc raftpb.ConfChange + pbutil.MustUnmarshal(&cc, e.Data) + removedSelf, err := s.applyConfChange(cc, confState) + s.setAppliedIndex(e.Index) + shouldStop = shouldStop || removedSelf + s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) + default: + plog.Panicf("entry type should be either EntryNormal or EntryConfChange") + } + atomic.StoreUint64(&s.r.index, e.Index) + atomic.StoreUint64(&s.r.term, e.Term) + appliedt = e.Term + appliedi = e.Index + } + return appliedt, appliedi, shouldStop +} + +// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer +func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { + shouldApplyV3 := false + if e.Index > s.consistIndex.ConsistentIndex() { + // set the consistent index of current executing entry + s.consistIndex.setConsistentIndex(e.Index) + shouldApplyV3 = true + } + defer s.setAppliedIndex(e.Index) + + // raft state machine may generate noop entry when leader confirmation. + // skip it in advance to avoid some potential bug in the future + if len(e.Data) == 0 { + select { + case s.forceVersionC <- struct{}{}: + default: + } + // promote lessor when the local member is leader and finished + // applying all entries from the last term. + if s.isLeader() { + s.lessor.Promote(s.Cfg.electionTimeout()) + } + return + } + + var raftReq pb.InternalRaftRequest + if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible + var r pb.Request + pbutil.MustUnmarshal(&r, e.Data) + s.w.Trigger(r.ID, s.applyV2Request(&r)) + return + } + if raftReq.V2 != nil { + req := raftReq.V2 + s.w.Trigger(req.ID, s.applyV2Request(req)) + return + } + + // do not re-apply applied entries. + if !shouldApplyV3 { + return + } + + id := raftReq.ID + if id == 0 { + id = raftReq.Header.ID + } + + var ar *applyResult + needResult := s.w.IsRegistered(id) + if needResult || !noSideEffect(&raftReq) { + if !needResult && raftReq.Txn != nil { + removeNeedlessRangeReqs(raftReq.Txn) + } + ar = s.applyV3.Apply(&raftReq) + } + + if ar == nil { + return + } + + if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { + s.w.Trigger(id, ar) + return + } + + plog.Errorf("applying raft message exceeded backend quota") + s.goAttach(func() { + a := &pb.AlarmRequest{ + MemberID: uint64(s.ID()), + Action: pb.AlarmRequest_ACTIVATE, + Alarm: pb.AlarmType_NOSPACE, + } + s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) + s.w.Trigger(id, ar) + }) +} + +// applyConfChange applies a ConfChange to the server. It is only +// invoked with a ConfChange that has already passed through Raft +func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) { + if err := s.cluster.ValidateConfigurationChange(cc); err != nil { + cc.NodeID = raft.None + s.r.ApplyConfChange(cc) + return false, err + } + *confState = *s.r.ApplyConfChange(cc) + switch cc.Type { + case raftpb.ConfChangeAddNode: + m := new(membership.Member) + if err := json.Unmarshal(cc.Context, m); err != nil { + plog.Panicf("unmarshal member should never fail: %v", err) + } + if cc.NodeID != uint64(m.ID) { + plog.Panicf("nodeID should always be equal to member ID") + } + s.cluster.AddMember(m) + if m.ID != s.id { + s.r.transport.AddPeer(m.ID, m.PeerURLs) + } + case raftpb.ConfChangeRemoveNode: + id := types.ID(cc.NodeID) + s.cluster.RemoveMember(id) + if id == s.id { + return true, nil + } + s.r.transport.RemovePeer(id) + case raftpb.ConfChangeUpdateNode: + m := new(membership.Member) + if err := json.Unmarshal(cc.Context, m); err != nil { + plog.Panicf("unmarshal member should never fail: %v", err) + } + if cc.NodeID != uint64(m.ID) { + plog.Panicf("nodeID should always be equal to member ID") + } + s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes) + if m.ID != s.id { + s.r.transport.UpdatePeer(m.ID, m.PeerURLs) + } + } + return false, nil +} + +// TODO: non-blocking snapshot +func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { + clone := s.store.Clone() + // commit kv to write metadata (for example: consistent index) to disk. + // KV().commit() updates the consistent index in backend. + // All operations that update consistent index must be called sequentially + // from applyAll function. + // So KV().Commit() cannot run in parallel with apply. It has to be called outside + // the go routine created below. + s.KV().Commit() + + s.goAttach(func() { + d, err := clone.SaveNoCopy() + // TODO: current store will never fail to do a snapshot + // what should we do if the store might fail? + if err != nil { + plog.Panicf("store save should never fail: %v", err) + } + snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) + if err != nil { + // the snapshot was done asynchronously with the progress of raft. + // raft might have already got a newer snapshot. + if err == raft.ErrSnapOutOfDate { + return + } + plog.Panicf("unexpected create snapshot error %v", err) + } + // SaveSnap saves the snapshot and releases the locked wal files + // to the snapshot index. + if err = s.r.storage.SaveSnap(snap); err != nil { + plog.Fatalf("save snapshot error: %v", err) + } + plog.Infof("saved snapshot at index %d", snap.Metadata.Index) + + // When sending a snapshot, etcd will pause compaction. + // After receives a snapshot, the slow follower needs to get all the entries right after + // the snapshot sent to catch up. If we do not pause compaction, the log entries right after + // the snapshot sent might already be compacted. It happens when the snapshot takes long time + // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. + if atomic.LoadInt64(&s.inflightSnapshots) != 0 { + plog.Infof("skip compaction since there is an inflight snapshot") + return + } + + // keep some in memory log entries for slow followers. + compacti := uint64(1) + if snapi > numberOfCatchUpEntries { + compacti = snapi - numberOfCatchUpEntries + } + err = s.r.raftStorage.Compact(compacti) + if err != nil { + // the compaction was done asynchronously with the progress of raft. + // raft log might already been compact. + if err == raft.ErrCompacted { + return + } + plog.Panicf("unexpected compaction error %v", err) + } + plog.Infof("compacted raft log at %d", compacti) + }) +} + +// CutPeer drops messages to the specified peer. +func (s *EtcdServer) CutPeer(id types.ID) { + tr, ok := s.r.transport.(*rafthttp.Transport) + if ok { + tr.CutPeer(id) + } +} + +// MendPeer recovers the message dropping behavior of the given peer. +func (s *EtcdServer) MendPeer(id types.ID) { + tr, ok := s.r.transport.(*rafthttp.Transport) + if ok { + tr.MendPeer(id) + } +} + +func (s *EtcdServer) PauseSending() { s.r.pauseSending() } + +func (s *EtcdServer) ResumeSending() { s.r.resumeSending() } + +func (s *EtcdServer) ClusterVersion() *semver.Version { + if s.cluster == nil { + return nil + } + return s.cluster.Version() +} + +// monitorVersions checks the member's version every monitorVersionInterval. +// It updates the cluster version if all members agrees on a higher one. +// It prints out log if there is a member with a higher version than the +// local version. +func (s *EtcdServer) monitorVersions() { + for { + select { + case <-s.forceVersionC: + case <-time.After(monitorVersionInterval): + case <-s.stopping: + return + } + + if s.Leader() != s.ID() { + continue + } + + v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt)) + if v != nil { + // only keep major.minor version for comparison + v = &semver.Version{ + Major: v.Major, + Minor: v.Minor, + } + } + + // if the current version is nil: + // 1. use the decided version if possible + // 2. or use the min cluster version + if s.cluster.Version() == nil { + verStr := version.MinClusterVersion + if v != nil { + verStr = v.String() + } + s.goAttach(func() { s.updateClusterVersion(verStr) }) + continue + } + + // update cluster version only if the decided version is greater than + // the current cluster version + if v != nil && s.cluster.Version().LessThan(*v) { + s.goAttach(func() { s.updateClusterVersion(v.String()) }) + } + } +} + +func (s *EtcdServer) updateClusterVersion(ver string) { + if s.cluster.Version() == nil { + plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) + } else { + plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) + } + req := pb.Request{ + Method: "PUT", + Path: membership.StoreClusterVersionKey(), + Val: ver, + } + ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) + _, err := s.Do(ctx, req) + cancel() + switch err { + case nil: + return + case ErrStopped: + plog.Infof("aborting update cluster version because server is stopped") + return + default: + plog.Errorf("error updating cluster version (%v)", err) + } +} + +func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { + switch err { + case context.Canceled: + return ErrCanceled + case context.DeadlineExceeded: + s.leadTimeMu.RLock() + curLeadElected := s.leadElectedTime + s.leadTimeMu.RUnlock() + prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) + if start.After(prevLeadLost) && start.Before(curLeadElected) { + return ErrTimeoutDueToLeaderFail + } + + lead := types.ID(atomic.LoadUint64(&s.r.lead)) + switch lead { + case types.ID(raft.None): + // TODO: return error to specify it happens because the cluster does not have leader now + case s.ID(): + if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { + return ErrTimeoutDueToConnectionLost + } + default: + if !isConnectedSince(s.r.transport, start, lead) { + return ErrTimeoutDueToConnectionLost + } + } + + return ErrTimeout + default: + return err + } +} + +func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv } +func (s *EtcdServer) Backend() backend.Backend { + s.bemu.Lock() + defer s.bemu.Unlock() + return s.be +} + +func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } + +func (s *EtcdServer) restoreAlarms() error { + s.applyV3 = s.newApplierV3() + as, err := alarm.NewAlarmStore(s) + if err != nil { + return err + } + s.alarmStore = as + if len(as.Get(pb.AlarmType_NOSPACE)) > 0 { + s.applyV3 = newApplierV3Capped(s.applyV3) + } + return nil +} + +func (s *EtcdServer) getAppliedIndex() uint64 { + return atomic.LoadUint64(&s.appliedIndex) +} + +func (s *EtcdServer) setAppliedIndex(v uint64) { + atomic.StoreUint64(&s.appliedIndex, v) +} + +func (s *EtcdServer) getCommittedIndex() uint64 { + return atomic.LoadUint64(&s.committedIndex) +} + +func (s *EtcdServer) setCommittedIndex(v uint64) { + atomic.StoreUint64(&s.committedIndex, v) +} + +// goAttach creates a goroutine on a given function and tracks it using +// the etcdserver waitgroup. +func (s *EtcdServer) goAttach(f func()) { + s.wgMu.RLock() // this blocks with ongoing close(s.stopping) + defer s.wgMu.RUnlock() + select { + case <-s.stopping: + plog.Warning("server has stopped (skipping goAttach)") + return + default: + } + + // now safe to add since waitgroup wait has not started yet + s.wg.Add(1) + go func() { + defer s.wg.Done() + f() + }() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go new file mode 100644 index 000000000000..928aa95b6b16 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "io" + + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" +) + +// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf), +// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message +// as ReadCloser. +func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message { + // get a snapshot of v2 store as []byte + clone := s.store.Clone() + d, err := clone.SaveNoCopy() + if err != nil { + plog.Panicf("store save should never fail: %v", err) + } + + // commit kv to write metadata(for example: consistent index). + s.KV().Commit() + dbsnap := s.be.Snapshot() + // get a snapshot of v3 KV as readCloser + rc := newSnapshotReaderCloser(dbsnap) + + // put the []byte snapshot of store into raft snapshot and return the merged snapshot with + // KV readCloser snapshot. + snapshot := raftpb.Snapshot{ + Metadata: raftpb.SnapshotMetadata{ + Index: snapi, + Term: snapt, + ConfState: confState, + }, + Data: d, + } + m.Snapshot = snapshot + + return *snap.NewMessage(m, rc, dbsnap.Size()) +} + +func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { + pr, pw := io.Pipe() + go func() { + n, err := snapshot.WriteTo(pw) + if err == nil { + plog.Infof("wrote database snapshot out [total bytes: %d]", n) + } else { + plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) + } + pw.CloseWithError(err) + err = snapshot.Close() + if err != nil { + plog.Panicf("failed to close database snapshot: %v", err) + } + }() + return pr +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/BUILD b/vendor/github.com/coreos/etcd/etcdserver/stats/BUILD new file mode 100644 index 000000000000..5bf333040d47 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "leader.go", + "queue.go", + "server.go", + "stats.go", + ], + importpath = "github.com/coreos/etcd/etcdserver/stats", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go new file mode 100644 index 000000000000..8f6a54ff751a --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go @@ -0,0 +1,128 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stats + +import ( + "encoding/json" + "math" + "sync" + "time" +) + +// LeaderStats is used by the leader in an etcd cluster, and encapsulates +// statistics about communication with its followers +type LeaderStats struct { + leaderStats + sync.Mutex +} + +type leaderStats struct { + // Leader is the ID of the leader in the etcd cluster. + // TODO(jonboulle): clarify that these are IDs, not names + Leader string `json:"leader"` + Followers map[string]*FollowerStats `json:"followers"` +} + +// NewLeaderStats generates a new LeaderStats with the given id as leader +func NewLeaderStats(id string) *LeaderStats { + return &LeaderStats{ + leaderStats: leaderStats{ + Leader: id, + Followers: make(map[string]*FollowerStats), + }, + } +} + +func (ls *LeaderStats) JSON() []byte { + ls.Lock() + stats := ls.leaderStats + ls.Unlock() + b, err := json.Marshal(stats) + // TODO(jonboulle): appropriate error handling? + if err != nil { + plog.Errorf("error marshalling leader stats (%v)", err) + } + return b +} + +func (ls *LeaderStats) Follower(name string) *FollowerStats { + ls.Lock() + defer ls.Unlock() + fs, ok := ls.Followers[name] + if !ok { + fs = &FollowerStats{} + fs.Latency.Minimum = 1 << 63 + ls.Followers[name] = fs + } + return fs +} + +// FollowerStats encapsulates various statistics about a follower in an etcd cluster +type FollowerStats struct { + Latency LatencyStats `json:"latency"` + Counts CountsStats `json:"counts"` + + sync.Mutex +} + +// LatencyStats encapsulates latency statistics. +type LatencyStats struct { + Current float64 `json:"current"` + Average float64 `json:"average"` + averageSquare float64 + StandardDeviation float64 `json:"standardDeviation"` + Minimum float64 `json:"minimum"` + Maximum float64 `json:"maximum"` +} + +// CountsStats encapsulates raft statistics. +type CountsStats struct { + Fail uint64 `json:"fail"` + Success uint64 `json:"success"` +} + +// Succ updates the FollowerStats with a successful send +func (fs *FollowerStats) Succ(d time.Duration) { + fs.Lock() + defer fs.Unlock() + + total := float64(fs.Counts.Success) * fs.Latency.Average + totalSquare := float64(fs.Counts.Success) * fs.Latency.averageSquare + + fs.Counts.Success++ + + fs.Latency.Current = float64(d) / (1000000.0) + + if fs.Latency.Current > fs.Latency.Maximum { + fs.Latency.Maximum = fs.Latency.Current + } + + if fs.Latency.Current < fs.Latency.Minimum { + fs.Latency.Minimum = fs.Latency.Current + } + + fs.Latency.Average = (total + fs.Latency.Current) / float64(fs.Counts.Success) + fs.Latency.averageSquare = (totalSquare + fs.Latency.Current*fs.Latency.Current) / float64(fs.Counts.Success) + + // sdv = sqrt(avg(x^2) - avg(x)^2) + fs.Latency.StandardDeviation = math.Sqrt(fs.Latency.averageSquare - fs.Latency.Average*fs.Latency.Average) +} + +// Fail updates the FollowerStats with an unsuccessful send +func (fs *FollowerStats) Fail() { + fs.Lock() + defer fs.Unlock() + fs.Counts.Fail++ +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go b/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go new file mode 100644 index 000000000000..635074c48983 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go @@ -0,0 +1,110 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stats + +import ( + "sync" + "time" +) + +const ( + queueCapacity = 200 +) + +// RequestStats represent the stats for a request. +// It encapsulates the sending time and the size of the request. +type RequestStats struct { + SendingTime time.Time + Size int +} + +type statsQueue struct { + items [queueCapacity]*RequestStats + size int + front int + back int + totalReqSize int + rwl sync.RWMutex +} + +func (q *statsQueue) Len() int { + return q.size +} + +func (q *statsQueue) ReqSize() int { + return q.totalReqSize +} + +// FrontAndBack gets the front and back elements in the queue +// We must grab front and back together with the protection of the lock +func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) { + q.rwl.RLock() + defer q.rwl.RUnlock() + if q.size != 0 { + return q.items[q.front], q.items[q.back] + } + return nil, nil +} + +// Insert function insert a RequestStats into the queue and update the records +func (q *statsQueue) Insert(p *RequestStats) { + q.rwl.Lock() + defer q.rwl.Unlock() + + q.back = (q.back + 1) % queueCapacity + + if q.size == queueCapacity { //dequeue + q.totalReqSize -= q.items[q.front].Size + q.front = (q.back + 1) % queueCapacity + } else { + q.size++ + } + + q.items[q.back] = p + q.totalReqSize += q.items[q.back].Size + +} + +// Rate function returns the package rate and byte rate +func (q *statsQueue) Rate() (float64, float64) { + front, back := q.frontAndBack() + + if front == nil || back == nil { + return 0, 0 + } + + if time.Since(back.SendingTime) > time.Second { + q.Clear() + return 0, 0 + } + + sampleDuration := back.SendingTime.Sub(front.SendingTime) + + pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second) + + br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second) + + return pr, br +} + +// Clear function clear up the statsQueue +func (q *statsQueue) Clear() { + q.rwl.Lock() + defer q.rwl.Unlock() + q.back = -1 + q.front = 0 + q.size = 0 + q.totalReqSize = 0 +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go new file mode 100644 index 000000000000..0278e885cf99 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go @@ -0,0 +1,142 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stats + +import ( + "encoding/json" + "log" + "sync" + "time" + + "github.com/coreos/etcd/raft" +) + +// ServerStats encapsulates various statistics about an EtcdServer and its +// communication with other members of the cluster +type ServerStats struct { + serverStats + sync.Mutex +} + +func NewServerStats(name, id string) *ServerStats { + ss := &ServerStats{ + serverStats: serverStats{ + Name: name, + ID: id, + }, + } + now := time.Now() + ss.StartTime = now + ss.LeaderInfo.StartTime = now + ss.sendRateQueue = &statsQueue{back: -1} + ss.recvRateQueue = &statsQueue{back: -1} + return ss +} + +type serverStats struct { + Name string `json:"name"` + // ID is the raft ID of the node. + // TODO(jonboulle): use ID instead of name? + ID string `json:"id"` + State raft.StateType `json:"state"` + StartTime time.Time `json:"startTime"` + + LeaderInfo struct { + Name string `json:"leader"` + Uptime string `json:"uptime"` + StartTime time.Time `json:"startTime"` + } `json:"leaderInfo"` + + RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"` + RecvingPkgRate float64 `json:"recvPkgRate,omitempty"` + RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"` + + SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"` + SendingPkgRate float64 `json:"sendPkgRate,omitempty"` + SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"` + + sendRateQueue *statsQueue + recvRateQueue *statsQueue +} + +func (ss *ServerStats) JSON() []byte { + ss.Lock() + stats := ss.serverStats + ss.Unlock() + stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() + stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate() + stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate() + b, err := json.Marshal(stats) + // TODO(jonboulle): appropriate error handling? + if err != nil { + log.Printf("stats: error marshalling server stats: %v", err) + } + return b +} + +// RecvAppendReq updates the ServerStats in response to an AppendRequest +// from the given leader being received +func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { + ss.Lock() + defer ss.Unlock() + + now := time.Now() + + ss.State = raft.StateFollower + if leader != ss.LeaderInfo.Name { + ss.LeaderInfo.Name = leader + ss.LeaderInfo.StartTime = now + } + + ss.recvRateQueue.Insert( + &RequestStats{ + SendingTime: now, + Size: reqSize, + }, + ) + ss.RecvAppendRequestCnt++ +} + +// SendAppendReq updates the ServerStats in response to an AppendRequest +// being sent by this server +func (ss *ServerStats) SendAppendReq(reqSize int) { + ss.Lock() + defer ss.Unlock() + + ss.becomeLeader() + + ss.sendRateQueue.Insert( + &RequestStats{ + SendingTime: time.Now(), + Size: reqSize, + }, + ) + + ss.SendAppendRequestCnt++ +} + +func (ss *ServerStats) BecomeLeader() { + ss.Lock() + defer ss.Unlock() + ss.becomeLeader() +} + +func (ss *ServerStats) becomeLeader() { + if ss.State != raft.StateLeader { + ss.State = raft.StateLeader + ss.LeaderInfo.Name = ss.ID + ss.LeaderInfo.StartTime = time.Now() + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go b/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go new file mode 100644 index 000000000000..2b5f7071aa76 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go @@ -0,0 +1,32 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stats defines a standard interface for etcd cluster statistics. +package stats + +import "github.com/coreos/pkg/capnslog" + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats") +) + +type Stats interface { + // SelfStats returns the struct representing statistics of this server + SelfStats() []byte + // LeaderStats returns the statistics of all followers in the cluster + // if this server is leader. Otherwise, nil is returned. + LeaderStats() []byte + // StoreStats returns statistics of the store backing this EtcdServer + StoreStats() []byte +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go new file mode 100644 index 000000000000..aa8f87569dbe --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/storage.go @@ -0,0 +1,98 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "io" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" + "github.com/coreos/etcd/wal" + "github.com/coreos/etcd/wal/walpb" +) + +type Storage interface { + // Save function saves ents and state to the underlying stable storage. + // Save MUST block until st and ents are on stable storage. + Save(st raftpb.HardState, ents []raftpb.Entry) error + // SaveSnap function saves snapshot to the underlying stable storage. + SaveSnap(snap raftpb.Snapshot) error + // Close closes the Storage and performs finalization. + Close() error +} + +type storage struct { + *wal.WAL + *snap.Snapshotter +} + +func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage { + return &storage{w, s} +} + +// SaveSnap saves the snapshot to disk and release the locked +// wal files since they will not be used. +func (st *storage) SaveSnap(snap raftpb.Snapshot) error { + walsnap := walpb.Snapshot{ + Index: snap.Metadata.Index, + Term: snap.Metadata.Term, + } + err := st.WAL.SaveSnapshot(walsnap) + if err != nil { + return err + } + err = st.Snapshotter.SaveSnap(snap) + if err != nil { + return err + } + return st.WAL.ReleaseLockTo(snap.Metadata.Index) +} + +func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { + var ( + err error + wmetadata []byte + ) + + repaired := false + for { + if w, err = wal.Open(waldir, snap); err != nil { + plog.Fatalf("open wal error: %v", err) + } + if wmetadata, st, ents, err = w.ReadAll(); err != nil { + w.Close() + // we can only repair ErrUnexpectedEOF and we never repair twice. + if repaired || err != io.ErrUnexpectedEOF { + plog.Fatalf("read wal error (%v) and cannot be repaired", err) + } + if !wal.Repair(waldir) { + plog.Fatalf("WAL error (%v) cannot be repaired", err) + } else { + plog.Infof("repaired WAL error (%v)", err) + repaired = true + } + continue + } + break + } + var metadata pb.Metadata + pbutil.MustUnmarshal(&metadata, wmetadata) + id = types.ID(metadata.NodeID) + cid = types.ID(metadata.ClusterID) + return +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go new file mode 100644 index 000000000000..e3896ffc2d3d --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/util.go @@ -0,0 +1,97 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "time" + + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/rafthttp" +) + +// isConnectedToQuorumSince checks whether the local member is connected to the +// quorum of the cluster since the given time. +func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { + return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1 +} + +// isConnectedSince checks whether the local member is connected to the +// remote member since the given time. +func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool { + t := transport.ActiveSince(remote) + return !t.IsZero() && t.Before(since) +} + +// isConnectedFullySince checks whether the local member is connected to all +// members in the cluster since the given time. +func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { + return numConnectedSince(transport, since, self, members) == len(members) +} + +// numConnectedSince counts how many members are connected to the local member +// since the given time. +func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int { + connectedNum := 0 + for _, m := range members { + if m.ID == self || isConnectedSince(transport, since, m.ID) { + connectedNum++ + } + } + return connectedNum +} + +// longestConnected chooses the member with longest active-since-time. +// It returns false, if nothing is active. +func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) { + var longest types.ID + var oldest time.Time + for _, id := range membs { + tm := tp.ActiveSince(id) + if tm.IsZero() { // inactive + continue + } + + if oldest.IsZero() { // first longest candidate + oldest = tm + longest = id + } + + if tm.Before(oldest) { + oldest = tm + longest = id + } + } + if uint64(longest) == 0 { + return longest, false + } + return longest, true +} + +type notifier struct { + c chan struct{} + err error +} + +func newNotifier() *notifier { + return ¬ifier{ + c: make(chan struct{}), + } +} + +func (nc *notifier) notify(err error) { + nc.err = err + close(nc.c) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go new file mode 100644 index 000000000000..72c4eb7c5cc3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go @@ -0,0 +1,125 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" +) + +type v2API interface { + Post(ctx context.Context, r *pb.Request) (Response, error) + Put(ctx context.Context, r *pb.Request) (Response, error) + Delete(ctx context.Context, r *pb.Request) (Response, error) + QGet(ctx context.Context, r *pb.Request) (Response, error) + Get(ctx context.Context, r *pb.Request) (Response, error) + Head(ctx context.Context, r *pb.Request) (Response, error) +} + +type v2apiStore struct{ s *EtcdServer } + +func (a *v2apiStore) Post(ctx context.Context, r *pb.Request) (Response, error) { + return a.processRaftRequest(ctx, r) +} + +func (a *v2apiStore) Put(ctx context.Context, r *pb.Request) (Response, error) { + return a.processRaftRequest(ctx, r) +} + +func (a *v2apiStore) Delete(ctx context.Context, r *pb.Request) (Response, error) { + return a.processRaftRequest(ctx, r) +} + +func (a *v2apiStore) QGet(ctx context.Context, r *pb.Request) (Response, error) { + return a.processRaftRequest(ctx, r) +} + +func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Response, error) { + data, err := r.Marshal() + if err != nil { + return Response{}, err + } + ch := a.s.w.Register(r.ID) + + start := time.Now() + a.s.r.Propose(ctx, data) + proposalsPending.Inc() + defer proposalsPending.Dec() + + select { + case x := <-ch: + resp := x.(Response) + return resp, resp.err + case <-ctx.Done(): + proposalsFailed.Inc() + a.s.w.Trigger(r.ID, nil) // GC wait + return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) + case <-a.s.stopping: + } + return Response{}, ErrStopped +} + +func (a *v2apiStore) Get(ctx context.Context, r *pb.Request) (Response, error) { + if r.Wait { + wc, err := a.s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since) + if err != nil { + return Response{}, err + } + return Response{Watcher: wc}, nil + } + ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) + if err != nil { + return Response{}, err + } + return Response{Event: ev}, nil +} + +func (a *v2apiStore) Head(ctx context.Context, r *pb.Request) (Response, error) { + ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) + if err != nil { + return Response{}, err + } + return Response{Event: ev}, nil +} + +// Do interprets r and performs an operation on s.store according to r.Method +// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with +// Quorum == true, r will be sent through consensus before performing its +// respective operation. Do will block until an action is performed or there is +// an error. +func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { + r.ID = s.reqIDGen.Next() + if r.Method == "GET" && r.Quorum { + r.Method = "QGET" + } + v2api := (v2API)(&v2apiStore{s}) + switch r.Method { + case "POST": + return v2api.Post(ctx, &r) + case "PUT": + return v2api.Put(ctx, &r) + case "DELETE": + return v2api.Delete(ctx, &r) + case "QGET": + return v2api.QGet(ctx, &r) + case "GET": + return v2api.Get(ctx, &r) + case "HEAD": + return v2api.Head(ctx, &r) + } + return Response{}, ErrUnknownMethod +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go new file mode 100644 index 000000000000..ae449bbf22ff --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go @@ -0,0 +1,686 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "bytes" + "encoding/binary" + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/coreos/etcd/auth" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/lease/leasehttp" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/raft" + + "golang.org/x/net/context" +) + +const ( + // In the health case, there might be a small gap (10s of entries) between + // the applied index and committed index. + // However, if the committed entries are very heavy to apply, the gap might grow. + // We should stop accepting new proposals if the gap growing to a certain point. + maxGapBetweenApplyAndCommitIndex = 5000 +) + +type RaftKV interface { + Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) + Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) + DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) + Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) +} + +type Lessor interface { + // LeaseGrant sends LeaseGrant request to raft and apply it after committed. + LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) + // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed. + LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) + + // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error + // is returned. + LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) + + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) +} + +type Authenticator interface { + AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) + AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) + Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) + UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) +} + +func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if !r.Serializable { + err := s.linearizableReadNotify(ctx) + if err != nil { + return nil, err + } + } + var resp *pb.RangeResponse + var err error + chk := func(ai *auth.AuthInfo) error { + return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) + } + get := func() { resp, err = s.applyV3Base.Range(nil, r) } + if serr := s.doSerialize(ctx, chk, get); serr != nil { + return nil, serr + } + return resp, err +} + +func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) + if err != nil { + return nil, err + } + return resp.(*pb.PutResponse), nil +} + +func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) + if err != nil { + return nil, err + } + return resp.(*pb.DeleteRangeResponse), nil +} + +func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + if isTxnReadonly(r) { + if !isTxnSerializable(r) { + err := s.linearizableReadNotify(ctx) + if err != nil { + return nil, err + } + } + var resp *pb.TxnResponse + var err error + chk := func(ai *auth.AuthInfo) error { + return checkTxnAuth(s.authStore, ai, r) + } + get := func() { resp, err = s.applyV3Base.Txn(r) } + if serr := s.doSerialize(ctx, chk, get); serr != nil { + return nil, serr + } + return resp, err + } + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) + if err != nil { + return nil, err + } + return resp.(*pb.TxnResponse), nil +} + +func isTxnSerializable(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + return true +} + +func isTxnReadonly(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil { + return false + } + } + return true +} + +func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { + result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) + if r.Physical && result != nil && result.physc != nil { + <-result.physc + // The compaction is done deleting keys; the hash is now settled + // but the data is not necessarily committed. If there's a crash, + // the hash may revert to a hash prior to compaction completing + // if the compaction resumes. Force the finished compaction to + // commit so it won't resume following a crash. + s.be.ForceCommit() + } + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + resp := result.resp.(*pb.CompactionResponse) + if resp == nil { + resp = &pb.CompactionResponse{} + } + if resp.Header == nil { + resp.Header = &pb.ResponseHeader{} + } + resp.Header.Revision = s.kv.Rev() + return resp, nil +} + +func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + // no id given? choose one + for r.ID == int64(lease.NoLease) { + // only use positive int64 id's + r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) + } + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) + if err != nil { + return nil, err + } + return resp.(*pb.LeaseGrantResponse), nil +} + +func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) + if err != nil { + return nil, err + } + return resp.(*pb.LeaseRevokeResponse), nil +} + +func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { + ttl, err := s.lessor.Renew(id) + if err == nil { // already requested to primary lessor(leader) + return ttl, nil + } + if err != lease.ErrNotPrimary { + return -1, err + } + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + + // renewals don't go through raft; forward to leader manually + for cctx.Err() == nil && err != nil { + leader, lerr := s.waitLeader(cctx) + if lerr != nil { + return -1, lerr + } + for _, url := range leader.PeerURLs { + lurl := url + leasehttp.LeasePrefix + ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt) + if err == nil || err == lease.ErrLeaseNotFound { + return ttl, err + } + } + } + return -1, ErrTimeout +} + +func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { + if s.Leader() == s.ID() { + // primary; timetolive directly from leader + le := s.lessor.Lookup(lease.LeaseID(r.ID)) + if le == nil { + return nil, lease.ErrLeaseNotFound + } + // TODO: fill out ResponseHeader + resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()} + if r.Keys { + ks := le.Keys() + kbs := make([][]byte, len(ks)) + for i := range ks { + kbs[i] = []byte(ks[i]) + } + resp.Keys = kbs + } + return resp, nil + } + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + + // forward to leader + for cctx.Err() == nil { + leader, err := s.waitLeader(cctx) + if err != nil { + return nil, err + } + for _, url := range leader.PeerURLs { + lurl := url + leasehttp.LeaseInternalPrefix + resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) + if err == nil { + return resp.LeaseTimeToLiveResponse, nil + } + if err == lease.ErrLeaseNotFound { + return nil, err + } + } + } + return nil, ErrTimeout +} + +func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { + leader := s.cluster.Member(s.Leader()) + for leader == nil { + // wait an election + dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond + select { + case <-time.After(dur): + leader = s.cluster.Member(s.Leader()) + case <-s.stopping: + return nil, ErrStopped + case <-ctx.Done(): + return nil, ErrNoLeader + } + } + if leader == nil || len(leader.PeerURLs) == 0 { + return nil, ErrNoLeader + } + return leader, nil +} + +func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AlarmResponse), nil +} + +func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthEnableResponse), nil +} + +func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthDisableResponse), nil +} + +func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { + if err := s.linearizableReadNotify(ctx); err != nil { + return nil, err + } + + var resp proto.Message + for { + checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) + if err != nil { + if err != auth.ErrAuthNotEnabled { + plog.Errorf("invalid authentication request to user %s was issued", r.Name) + } + return nil, err + } + + st, err := s.AuthStore().GenTokenPrefix() + if err != nil { + return nil, err + } + + internalReq := &pb.InternalAuthenticateRequest{ + Name: r.Name, + Password: r.Password, + SimpleToken: st, + } + + resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) + if err != nil { + return nil, err + } + if checkedRevision == s.AuthStore().Revision() { + break + } + plog.Infof("revision when password checked is obsolete, retrying") + } + + return resp.(*pb.AuthenticateResponse), nil +} + +func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserAddResponse), nil +} + +func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserDeleteResponse), nil +} + +func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserChangePasswordResponse), nil +} + +func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserGrantRoleResponse), nil +} + +func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserGetResponse), nil +} + +func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserListResponse), nil +} + +func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserRevokeRoleResponse), nil +} + +func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleAddResponse), nil +} + +func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleGrantPermissionResponse), nil +} + +func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleGetResponse), nil +} + +func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleListResponse), nil +} + +func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleRevokePermissionResponse), nil +} + +func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleDeleteResponse), nil +} + +func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + result, err := s.processInternalRaftRequestOnce(ctx, r) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp, nil +} + +func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + for { + resp, err := s.raftRequestOnce(ctx, r) + if err != auth.ErrAuthOldRevision { + return resp, err + } + } +} + +// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. +func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { + for { + ai, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + if ai == nil { + // chk expects non-nil AuthInfo; use empty credentials + ai = &auth.AuthInfo{} + } + if err = chk(ai); err != nil { + if err == auth.ErrAuthOldRevision { + continue + } + return err + } + // fetch response for serialized request + get() + // empty credentials or current auth info means no need to retry + if ai.Revision == 0 || ai.Revision == s.authStore.Revision() { + return nil + } + // avoid TOCTOU error, retry of the request is required. + } +} + +func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { + ai := s.getAppliedIndex() + ci := s.getCommittedIndex() + if ci > ai+maxGapBetweenApplyAndCommitIndex { + return nil, ErrTooManyRequests + } + + r.Header = &pb.RequestHeader{ + ID: s.reqIDGen.Next(), + } + + authInfo, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return nil, err + } + if authInfo != nil { + r.Header.Username = authInfo.Username + r.Header.AuthRevision = authInfo.Revision + } + + data, err := r.Marshal() + if err != nil { + return nil, err + } + + if len(data) > int(s.Cfg.MaxRequestBytes) { + return nil, ErrRequestTooLarge + } + + id := r.ID + if id == 0 { + id = r.Header.ID + } + ch := s.w.Register(id) + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + + start := time.Now() + s.r.Propose(cctx, data) + proposalsPending.Inc() + defer proposalsPending.Dec() + + select { + case x := <-ch: + return x.(*applyResult), nil + case <-cctx.Done(): + proposalsFailed.Inc() + s.w.Trigger(id, nil) // GC wait + return nil, s.parseProposeCtxErr(cctx.Err(), start) + case <-s.done: + return nil, ErrStopped + } +} + +// Watchable returns a watchable interface attached to the etcdserver. +func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } + +func (s *EtcdServer) linearizableReadLoop() { + var rs raft.ReadState + + for { + ctx := make([]byte, 8) + binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next()) + + select { + case <-s.readwaitc: + case <-s.stopping: + return + } + + nextnr := newNotifier() + + s.readMu.Lock() + nr := s.readNotifier + s.readNotifier = nextnr + s.readMu.Unlock() + + cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + if err := s.r.ReadIndex(cctx, ctx); err != nil { + cancel() + if err == raft.ErrStopped { + return + } + plog.Errorf("failed to get read index from raft: %v", err) + nr.notify(err) + continue + } + cancel() + + var ( + timeout bool + done bool + ) + for !timeout && !done { + select { + case rs = <-s.r.readStateC: + done = bytes.Equal(rs.RequestCtx, ctx) + if !done { + // a previous request might time out. now we should ignore the response of it and + // continue waiting for the response of the current requests. + plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx) + } + case <-time.After(s.Cfg.ReqTimeout()): + plog.Warningf("timed out waiting for read index response") + nr.notify(ErrTimeout) + timeout = true + case <-s.stopping: + return + } + } + if !done { + continue + } + + if ai := s.getAppliedIndex(); ai < rs.Index { + select { + case <-s.applyWait.Wait(rs.Index): + case <-s.stopping: + return + } + } + // unblock all l-reads requested at indices before rs.Index + nr.notify(nil) + } +} + +func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { + s.readMu.RLock() + nc := s.readNotifier + s.readMu.RUnlock() + + // signal linearizable loop for current notify if it hasn't been already + select { + case s.readwaitc <- struct{}{}: + default: + } + + // wait for read state notification + select { + case <-nc.c: + return nc.err + case <-ctx.Done(): + return ctx.Err() + case <-s.done: + return ErrStopped + } +} + +func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { + if s.Cfg.ClientCertAuthEnabled { + authInfo := s.AuthStore().AuthInfoFromTLS(ctx) + if authInfo != nil { + return authInfo, nil + } + } + + return s.AuthStore().AuthInfoFromCtx(ctx) +} diff --git a/vendor/github.com/coreos/etcd/lease/BUILD b/vendor/github.com/coreos/etcd/lease/BUILD new file mode 100644 index 000000000000..834ac6de3dd7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "lessor.go", + ], + importpath = "github.com/coreos/etcd/lease", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/lease/leasepb:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/monotime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/lease/leasehttp:all-srcs", + "//vendor/github.com/coreos/etcd/lease/leasepb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/lease/doc.go b/vendor/github.com/coreos/etcd/lease/doc.go new file mode 100644 index 000000000000..73e7d0ec51a2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lease provides an interface and implemetation for time-limited leases over arbitrary resources. +package lease diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD new file mode 100644 index 000000000000..1385cb46bf8b --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "http.go", + ], + importpath = "github.com/coreos/etcd/lease/leasehttp", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/coreos/etcd/lease:go_default_library", + "//vendor/github.com/coreos/etcd/lease/leasepb:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go b/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go new file mode 100644 index 000000000000..8177a37b663d --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package leasehttp serves lease renewals made through HTTP requests. +package leasehttp diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go new file mode 100644 index 000000000000..c3175cbbb0f0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go @@ -0,0 +1,242 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package leasehttp + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "net/http" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/lease/leasepb" + "github.com/coreos/etcd/pkg/httputil" +) + +var ( + LeasePrefix = "/leases" + LeaseInternalPrefix = "/leases/internal" + applyTimeout = time.Second + ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out") +) + +// NewHandler returns an http Handler for lease renewals +func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler { + return &leaseHandler{l, waitch} +} + +type leaseHandler struct { + l lease.Lessor + waitch func() <-chan struct{} +} + +func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, "error reading body", http.StatusBadRequest) + return + } + + var v []byte + switch r.URL.Path { + case LeasePrefix: + lreq := pb.LeaseKeepAliveRequest{} + if err := lreq.Unmarshal(b); err != nil { + http.Error(w, "error unmarshalling request", http.StatusBadRequest) + return + } + select { + case <-h.waitch(): + case <-time.After(applyTimeout): + http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) + return + } + ttl, err := h.l.Renew(lease.LeaseID(lreq.ID)) + if err != nil { + if err == lease.ErrLeaseNotFound { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + // TODO: fill out ResponseHeader + resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl} + v, err = resp.Marshal() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + case LeaseInternalPrefix: + lreq := leasepb.LeaseInternalRequest{} + if err := lreq.Unmarshal(b); err != nil { + http.Error(w, "error unmarshalling request", http.StatusBadRequest) + return + } + select { + case <-h.waitch(): + case <-time.After(applyTimeout): + http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) + return + } + l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID)) + if l == nil { + http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound) + return + } + // TODO: fill out ResponseHeader + resp := &leasepb.LeaseInternalResponse{ + LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{ + Header: &pb.ResponseHeader{}, + ID: lreq.LeaseTimeToLiveRequest.ID, + TTL: int64(l.Remaining().Seconds()), + GrantedTTL: l.TTL(), + }, + } + if lreq.LeaseTimeToLiveRequest.Keys { + ks := l.Keys() + kbs := make([][]byte, len(ks)) + for i := range ks { + kbs[i] = []byte(ks[i]) + } + resp.LeaseTimeToLiveResponse.Keys = kbs + } + + v, err = resp.Marshal() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + default: + http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/protobuf") + w.Write(v) +} + +// RenewHTTP renews a lease at a given primary server. +// TODO: Batch request in future? +func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) { + // will post lreq protobuf to leader + lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal() + if err != nil { + return -1, err + } + + cc := &http.Client{Transport: rt} + req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) + if err != nil { + return -1, err + } + req.Header.Set("Content-Type", "application/protobuf") + req.Cancel = ctx.Done() + + resp, err := cc.Do(req) + if err != nil { + return -1, err + } + b, err := readResponse(resp) + if err != nil { + return -1, err + } + + if resp.StatusCode == http.StatusRequestTimeout { + return -1, ErrLeaseHTTPTimeout + } + + if resp.StatusCode == http.StatusNotFound { + return -1, lease.ErrLeaseNotFound + } + + if resp.StatusCode != http.StatusOK { + return -1, fmt.Errorf("lease: unknown error(%s)", string(b)) + } + + lresp := &pb.LeaseKeepAliveResponse{} + if err := lresp.Unmarshal(b); err != nil { + return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) + } + if lresp.ID != int64(id) { + return -1, fmt.Errorf("lease: renew id mismatch") + } + return lresp.TTL, nil +} + +// TimeToLiveHTTP retrieves lease information of the given lease ID. +func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) { + // will post lreq protobuf to leader + lreq, err := (&leasepb.LeaseInternalRequest{&pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: keys}}).Marshal() + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/protobuf") + + req = req.WithContext(ctx) + + cc := &http.Client{Transport: rt} + var b []byte + // buffer errc channel so that errc don't block inside the go routinue + resp, err := cc.Do(req) + if err != nil { + return nil, err + } + b, err = readResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode == http.StatusRequestTimeout { + return nil, ErrLeaseHTTPTimeout + } + if resp.StatusCode == http.StatusNotFound { + return nil, lease.ErrLeaseNotFound + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("lease: unknown error(%s)", string(b)) + } + + lresp := &leasepb.LeaseInternalResponse{} + if err := lresp.Unmarshal(b); err != nil { + return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) + } + if lresp.LeaseTimeToLiveResponse.ID != int64(id) { + return nil, fmt.Errorf("lease: renew id mismatch") + } + return lresp, nil +} + +func readResponse(resp *http.Response) (b []byte, err error) { + b, err = ioutil.ReadAll(resp.Body) + httputil.GracefulClose(resp) + return +} diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/BUILD b/vendor/github.com/coreos/etcd/lease/leasepb/BUILD new file mode 100644 index 000000000000..d778728663d5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/leasepb/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["lease.pb.go"], + importpath = "github.com/coreos/etcd/lease/leasepb", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go new file mode 100644 index 000000000000..ec8db732be57 --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go @@ -0,0 +1,608 @@ +// Code generated by protoc-gen-gogo. +// source: lease.proto +// DO NOT EDIT! + +/* + Package leasepb is a generated protocol buffer package. + + It is generated from these files: + lease.proto + + It has these top-level messages: + Lease + LeaseInternalRequest + LeaseInternalResponse +*/ +package leasepb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Lease struct { + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"` +} + +func (m *Lease) Reset() { *m = Lease{} } +func (m *Lease) String() string { return proto.CompactTextString(m) } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{0} } + +type LeaseInternalRequest struct { + LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest" json:"LeaseTimeToLiveRequest,omitempty"` +} + +func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} } +func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseInternalRequest) ProtoMessage() {} +func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{1} } + +type LeaseInternalResponse struct { + LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse" json:"LeaseTimeToLiveResponse,omitempty"` +} + +func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} } +func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseInternalResponse) ProtoMessage() {} +func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{2} } + +func init() { + proto.RegisterType((*Lease)(nil), "leasepb.Lease") + proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest") + proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse") +} +func (m *Lease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lease) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintLease(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintLease(dAtA, i, uint64(m.TTL)) + } + return i, nil +} + +func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LeaseTimeToLiveRequest != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveRequest.Size())) + n1, err := m.LeaseTimeToLiveRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LeaseTimeToLiveResponse != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveResponse.Size())) + n2, err := m.LeaseTimeToLiveResponse.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64Lease(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Lease(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintLease(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Lease) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovLease(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovLease(uint64(m.TTL)) + } + return n +} + +func (m *LeaseInternalRequest) Size() (n int) { + var l int + _ = l + if m.LeaseTimeToLiveRequest != nil { + l = m.LeaseTimeToLiveRequest.Size() + n += 1 + l + sovLease(uint64(l)) + } + return n +} + +func (m *LeaseInternalResponse) Size() (n int) { + var l int + _ = l + if m.LeaseTimeToLiveResponse != nil { + l = m.LeaseTimeToLiveResponse.Size() + n += 1 + l + sovLease(uint64(l)) + } + return n +} + +func sovLease(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozLease(x uint64) (n int) { + return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Lease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseInternalRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseTimeToLiveRequest == nil { + m.LeaseTimeToLiveRequest = &etcdserverpb.LeaseTimeToLiveRequest{} + } + if err := m.LeaseTimeToLiveRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseInternalResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseTimeToLiveResponse == nil { + m.LeaseTimeToLiveResponse = &etcdserverpb.LeaseTimeToLiveResponse{} + } + if err := m.LeaseTimeToLiveResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLease(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthLease + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipLease(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) } + +var fileDescriptorLease = []byte{ + // 233 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, + 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, + 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45, + 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92, + 0x21, 0xea, 0x94, 0x34, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48, + 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8, + 0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0xa5, 0x12, 0x2e, 0x11, 0xb0, 0x52, 0xcf, 0xbc, 0x92, 0xd4, + 0xa2, 0xbc, 0xc4, 0x9c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0xa1, 0x18, 0x2e, 0x31, 0xb0, + 0x78, 0x48, 0x66, 0x6e, 0x6a, 0x48, 0xbe, 0x4f, 0x66, 0x59, 0x2a, 0x54, 0x06, 0x6c, 0x1a, 0xb7, + 0x91, 0x8a, 0x1e, 0xb2, 0xdd, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, 0xa9, 0x82, 0x4b, 0x14, + 0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, 0x0c, 0x2d, 0x10, 0x29, + 0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, 0xc4, 0x89, 0x87, 0x72, + 0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, + 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff, + 0xff, 0x9f, 0xf2, 0x42, 0xe0, 0x91, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto b/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto new file mode 100644 index 000000000000..be414b993ed9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; +package leasepb; + +import "gogoproto/gogo.proto"; +import "etcd/etcdserver/etcdserverpb/rpc.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message Lease { + int64 ID = 1; + int64 TTL = 2; +} + +message LeaseInternalRequest { + etcdserverpb.LeaseTimeToLiveRequest LeaseTimeToLiveRequest = 1; +} + +message LeaseInternalResponse { + etcdserverpb.LeaseTimeToLiveResponse LeaseTimeToLiveResponse = 1; +} diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go new file mode 100644 index 000000000000..3418cf565edb --- /dev/null +++ b/vendor/github.com/coreos/etcd/lease/lessor.go @@ -0,0 +1,643 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lease + +import ( + "encoding/binary" + "errors" + "math" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/coreos/etcd/lease/leasepb" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/monotime" +) + +const ( + // NoLease is a special LeaseID representing the absence of a lease. + NoLease = LeaseID(0) + + forever = monotime.Time(math.MaxInt64) +) + +var ( + leaseBucketName = []byte("lease") + + // maximum number of leases to revoke per second; configurable for tests + leaseRevokeRate = 1000 + + ErrNotPrimary = errors.New("not a primary lessor") + ErrLeaseNotFound = errors.New("lease not found") + ErrLeaseExists = errors.New("lease already exists") +) + +// TxnDelete is a TxnWrite that only permits deletes. Defined here +// to avoid circular dependency with mvcc. +type TxnDelete interface { + DeleteRange(key, end []byte) (n, rev int64) + End() +} + +// RangeDeleter is a TxnDelete constructor. +type RangeDeleter func() TxnDelete + +type LeaseID int64 + +// Lessor owns leases. It can grant, revoke, renew and modify leases for lessee. +type Lessor interface { + // SetRangeDeleter lets the lessor create TxnDeletes to the store. + // Lessor deletes the items in the revoked or expired lease by creating + // new TxnDeletes. + SetRangeDeleter(rd RangeDeleter) + + // Grant grants a lease that expires at least after TTL seconds. + Grant(id LeaseID, ttl int64) (*Lease, error) + // Revoke revokes a lease with given ID. The item attached to the + // given lease will be removed. If the ID does not exist, an error + // will be returned. + Revoke(id LeaseID) error + + // Attach attaches given leaseItem to the lease with given LeaseID. + // If the lease does not exist, an error will be returned. + Attach(id LeaseID, items []LeaseItem) error + + // GetLease returns LeaseID for given item. + // If no lease found, NoLease value will be returned. + GetLease(item LeaseItem) LeaseID + + // Detach detaches given leaseItem from the lease with given LeaseID. + // If the lease does not exist, an error will be returned. + Detach(id LeaseID, items []LeaseItem) error + + // Promote promotes the lessor to be the primary lessor. Primary lessor manages + // the expiration and renew of leases. + // Newly promoted lessor renew the TTL of all lease to extend + previous TTL. + Promote(extend time.Duration) + + // Demote demotes the lessor from being the primary lessor. + Demote() + + // Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist, + // an error will be returned. + Renew(id LeaseID) (int64, error) + + // Lookup gives the lease at a given lease id, if any + Lookup(id LeaseID) *Lease + + // ExpiredLeasesC returns a chan that is used to receive expired leases. + ExpiredLeasesC() <-chan []*Lease + + // Recover recovers the lessor state from the given backend and RangeDeleter. + Recover(b backend.Backend, rd RangeDeleter) + + // Stop stops the lessor for managing leases. The behavior of calling Stop multiple + // times is undefined. + Stop() +} + +// lessor implements Lessor interface. +// TODO: use clockwork for testability. +type lessor struct { + mu sync.Mutex + + // demotec is set when the lessor is the primary. + // demotec will be closed if the lessor is demoted. + demotec chan struct{} + + // TODO: probably this should be a heap with a secondary + // id index. + // Now it is O(N) to loop over the leases to find expired ones. + // We want to make Grant, Revoke, and findExpiredLeases all O(logN) and + // Renew O(1). + // findExpiredLeases and Renew should be the most frequent operations. + leaseMap map[LeaseID]*Lease + + itemMap map[LeaseItem]LeaseID + + // When a lease expires, the lessor will delete the + // leased range (or key) by the RangeDeleter. + rd RangeDeleter + + // backend to persist leases. We only persist lease ID and expiry for now. + // The leased items can be recovered by iterating all the keys in kv. + b backend.Backend + + // minLeaseTTL is the minimum lease TTL that can be granted for a lease. Any + // requests for shorter TTLs are extended to the minimum TTL. + minLeaseTTL int64 + + expiredC chan []*Lease + // stopC is a channel whose closure indicates that the lessor should be stopped. + stopC chan struct{} + // doneC is a channel whose closure indicates that the lessor is stopped. + doneC chan struct{} +} + +func NewLessor(b backend.Backend, minLeaseTTL int64) Lessor { + return newLessor(b, minLeaseTTL) +} + +func newLessor(b backend.Backend, minLeaseTTL int64) *lessor { + l := &lessor{ + leaseMap: make(map[LeaseID]*Lease), + itemMap: make(map[LeaseItem]LeaseID), + b: b, + minLeaseTTL: minLeaseTTL, + // expiredC is a small buffered chan to avoid unnecessary blocking. + expiredC: make(chan []*Lease, 16), + stopC: make(chan struct{}), + doneC: make(chan struct{}), + } + l.initAndRecover() + + go l.runLoop() + + return l +} + +// isPrimary indicates if this lessor is the primary lessor. The primary +// lessor manages lease expiration and renew. +// +// in etcd, raft leader is the primary. Thus there might be two primary +// leaders at the same time (raft allows concurrent leader but with different term) +// for at most a leader election timeout. +// The old primary leader cannot affect the correctness since its proposal has a +// smaller term and will not be committed. +// +// TODO: raft follower do not forward lease management proposals. There might be a +// very small window (within second normally which depends on go scheduling) that +// a raft follow is the primary between the raft leader demotion and lessor demotion. +// Usually this should not be a problem. Lease should not be that sensitive to timing. +func (le *lessor) isPrimary() bool { + return le.demotec != nil +} + +func (le *lessor) SetRangeDeleter(rd RangeDeleter) { + le.mu.Lock() + defer le.mu.Unlock() + + le.rd = rd +} + +func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) { + if id == NoLease { + return nil, ErrLeaseNotFound + } + + // TODO: when lessor is under high load, it should give out lease + // with longer TTL to reduce renew load. + l := &Lease{ + ID: id, + ttl: ttl, + itemSet: make(map[LeaseItem]struct{}), + revokec: make(chan struct{}), + } + + le.mu.Lock() + defer le.mu.Unlock() + + if _, ok := le.leaseMap[id]; ok { + return nil, ErrLeaseExists + } + + if l.ttl < le.minLeaseTTL { + l.ttl = le.minLeaseTTL + } + + if le.isPrimary() { + l.refresh(0) + } else { + l.forever() + } + + le.leaseMap[id] = l + l.persistTo(le.b) + + return l, nil +} + +func (le *lessor) Revoke(id LeaseID) error { + le.mu.Lock() + + l := le.leaseMap[id] + if l == nil { + le.mu.Unlock() + return ErrLeaseNotFound + } + defer close(l.revokec) + // unlock before doing external work + le.mu.Unlock() + + if le.rd == nil { + return nil + } + + txn := le.rd() + + // sort keys so deletes are in same order among all members, + // otherwise the backened hashes will be different + keys := l.Keys() + sort.StringSlice(keys).Sort() + for _, key := range keys { + txn.DeleteRange([]byte(key), nil) + } + + le.mu.Lock() + defer le.mu.Unlock() + delete(le.leaseMap, l.ID) + // lease deletion needs to be in the same backend transaction with the + // kv deletion. Or we might end up with not executing the revoke or not + // deleting the keys if etcdserver fails in between. + le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID))) + + txn.End() + return nil +} + +// Renew renews an existing lease. If the given lease does not exist or +// has expired, an error will be returned. +func (le *lessor) Renew(id LeaseID) (int64, error) { + le.mu.Lock() + + unlock := func() { le.mu.Unlock() } + defer func() { unlock() }() + + if !le.isPrimary() { + // forward renew request to primary instead of returning error. + return -1, ErrNotPrimary + } + + demotec := le.demotec + + l := le.leaseMap[id] + if l == nil { + return -1, ErrLeaseNotFound + } + + if l.expired() { + le.mu.Unlock() + unlock = func() {} + select { + // A expired lease might be pending for revoking or going through + // quorum to be revoked. To be accurate, renew request must wait for the + // deletion to complete. + case <-l.revokec: + return -1, ErrLeaseNotFound + // The expired lease might fail to be revoked if the primary changes. + // The caller will retry on ErrNotPrimary. + case <-demotec: + return -1, ErrNotPrimary + case <-le.stopC: + return -1, ErrNotPrimary + } + } + + l.refresh(0) + return l.ttl, nil +} + +func (le *lessor) Lookup(id LeaseID) *Lease { + le.mu.Lock() + defer le.mu.Unlock() + return le.leaseMap[id] +} + +func (le *lessor) Promote(extend time.Duration) { + le.mu.Lock() + defer le.mu.Unlock() + + le.demotec = make(chan struct{}) + + // refresh the expiries of all leases. + for _, l := range le.leaseMap { + l.refresh(extend) + } + + if len(le.leaseMap) < leaseRevokeRate { + // no possibility of lease pile-up + return + } + + // adjust expiries in case of overlap + leases := make([]*Lease, 0, len(le.leaseMap)) + for _, l := range le.leaseMap { + leases = append(leases, l) + } + sort.Sort(leasesByExpiry(leases)) + + baseWindow := leases[0].Remaining() + nextWindow := baseWindow + time.Second + expires := 0 + // have fewer expires than the total revoke rate so piled up leases + // don't consume the entire revoke limit + targetExpiresPerSecond := (3 * leaseRevokeRate) / 4 + for _, l := range leases { + remaining := l.Remaining() + if remaining > nextWindow { + baseWindow = remaining + nextWindow = baseWindow + time.Second + expires = 1 + continue + } + expires++ + if expires <= targetExpiresPerSecond { + continue + } + rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond)) + // If leases are extended by n seconds, leases n seconds ahead of the + // base window should be extended by only one second. + rateDelay -= float64(remaining - baseWindow) + delay := time.Duration(rateDelay) + nextWindow = baseWindow + delay + l.refresh(delay + extend) + } +} + +type leasesByExpiry []*Lease + +func (le leasesByExpiry) Len() int { return len(le) } +func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } +func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } + +func (le *lessor) Demote() { + le.mu.Lock() + defer le.mu.Unlock() + + // set the expiries of all leases to forever + for _, l := range le.leaseMap { + l.forever() + } + + if le.demotec != nil { + close(le.demotec) + le.demotec = nil + } +} + +// Attach attaches items to the lease with given ID. When the lease +// expires, the attached items will be automatically removed. +// If the given lease does not exist, an error will be returned. +func (le *lessor) Attach(id LeaseID, items []LeaseItem) error { + le.mu.Lock() + defer le.mu.Unlock() + + l := le.leaseMap[id] + if l == nil { + return ErrLeaseNotFound + } + + l.mu.Lock() + for _, it := range items { + l.itemSet[it] = struct{}{} + le.itemMap[it] = id + } + l.mu.Unlock() + return nil +} + +func (le *lessor) GetLease(item LeaseItem) LeaseID { + le.mu.Lock() + id := le.itemMap[item] + le.mu.Unlock() + return id +} + +// Detach detaches items from the lease with given ID. +// If the given lease does not exist, an error will be returned. +func (le *lessor) Detach(id LeaseID, items []LeaseItem) error { + le.mu.Lock() + defer le.mu.Unlock() + + l := le.leaseMap[id] + if l == nil { + return ErrLeaseNotFound + } + + l.mu.Lock() + for _, it := range items { + delete(l.itemSet, it) + delete(le.itemMap, it) + } + l.mu.Unlock() + return nil +} + +func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) { + le.mu.Lock() + defer le.mu.Unlock() + + le.b = b + le.rd = rd + le.leaseMap = make(map[LeaseID]*Lease) + le.itemMap = make(map[LeaseItem]LeaseID) + le.initAndRecover() +} + +func (le *lessor) ExpiredLeasesC() <-chan []*Lease { + return le.expiredC +} + +func (le *lessor) Stop() { + close(le.stopC) + <-le.doneC +} + +func (le *lessor) runLoop() { + defer close(le.doneC) + + for { + var ls []*Lease + + le.mu.Lock() + if le.isPrimary() { + ls = le.findExpiredLeases() + } + le.mu.Unlock() + + if len(ls) != 0 { + // rate limit + if len(ls) > leaseRevokeRate/2 { + ls = ls[:leaseRevokeRate/2] + } + select { + case <-le.stopC: + return + case le.expiredC <- ls: + default: + // the receiver of expiredC is probably busy handling + // other stuff + // let's try this next time after 500ms + } + } + + select { + case <-time.After(500 * time.Millisecond): + case <-le.stopC: + return + } + } +} + +// findExpiredLeases loops all the leases in the leaseMap and returns the expired +// leases that needed to be revoked. +func (le *lessor) findExpiredLeases() []*Lease { + leases := make([]*Lease, 0, 16) + + for _, l := range le.leaseMap { + // TODO: probably should change to <= 100-500 millisecond to + // make up committing latency. + if l.expired() { + leases = append(leases, l) + } + } + + return leases +} + +func (le *lessor) initAndRecover() { + tx := le.b.BatchTx() + tx.Lock() + + tx.UnsafeCreateBucket(leaseBucketName) + _, vs := tx.UnsafeRange(leaseBucketName, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0) + // TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue. + for i := range vs { + var lpb leasepb.Lease + err := lpb.Unmarshal(vs[i]) + if err != nil { + tx.Unlock() + panic("failed to unmarshal lease proto item") + } + ID := LeaseID(lpb.ID) + if lpb.TTL < le.minLeaseTTL { + lpb.TTL = le.minLeaseTTL + } + le.leaseMap[ID] = &Lease{ + ID: ID, + ttl: lpb.TTL, + // itemSet will be filled in when recover key-value pairs + // set expiry to forever, refresh when promoted + itemSet: make(map[LeaseItem]struct{}), + expiry: forever, + revokec: make(chan struct{}), + } + } + tx.Unlock() + + le.b.ForceCommit() +} + +type Lease struct { + ID LeaseID + ttl int64 // time to live in seconds + // expiry is time when lease should expire; must be 64-bit aligned. + expiry monotime.Time + + // mu protects concurrent accesses to itemSet + mu sync.RWMutex + itemSet map[LeaseItem]struct{} + revokec chan struct{} +} + +func (l *Lease) expired() bool { + return l.Remaining() <= 0 +} + +func (l *Lease) persistTo(b backend.Backend) { + key := int64ToBytes(int64(l.ID)) + + lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.ttl)} + val, err := lpb.Marshal() + if err != nil { + panic("failed to marshal lease proto item") + } + + b.BatchTx().Lock() + b.BatchTx().UnsafePut(leaseBucketName, key, val) + b.BatchTx().Unlock() +} + +// TTL returns the TTL of the Lease. +func (l *Lease) TTL() int64 { + return l.ttl +} + +// refresh refreshes the expiry of the lease. +func (l *Lease) refresh(extend time.Duration) { + t := monotime.Now().Add(extend + time.Duration(l.ttl)*time.Second) + atomic.StoreUint64((*uint64)(&l.expiry), uint64(t)) +} + +// forever sets the expiry of lease to be forever. +func (l *Lease) forever() { atomic.StoreUint64((*uint64)(&l.expiry), uint64(forever)) } + +// Keys returns all the keys attached to the lease. +func (l *Lease) Keys() []string { + l.mu.RLock() + keys := make([]string, 0, len(l.itemSet)) + for k := range l.itemSet { + keys = append(keys, k.Key) + } + l.mu.RUnlock() + return keys +} + +// Remaining returns the remaining time of the lease. +func (l *Lease) Remaining() time.Duration { + t := monotime.Time(atomic.LoadUint64((*uint64)(&l.expiry))) + return time.Duration(t - monotime.Now()) +} + +type LeaseItem struct { + Key string +} + +func int64ToBytes(n int64) []byte { + bytes := make([]byte, 8) + binary.BigEndian.PutUint64(bytes, uint64(n)) + return bytes +} + +// FakeLessor is a fake implementation of Lessor interface. +// Used for testing only. +type FakeLessor struct{} + +func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {} + +func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil } + +func (fl *FakeLessor) Revoke(id LeaseID) error { return nil } + +func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil } + +func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 } +func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil } + +func (fl *FakeLessor) Promote(extend time.Duration) {} + +func (fl *FakeLessor) Demote() {} + +func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil } + +func (le *FakeLessor) Lookup(id LeaseID) *Lease { return nil } + +func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil } + +func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {} + +func (fl *FakeLessor) Stop() {} diff --git a/vendor/github.com/coreos/etcd/mvcc/BUILD b/vendor/github.com/coreos/etcd/mvcc/BUILD new file mode 100644 index 000000000000..21b837ec9ac4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/BUILD @@ -0,0 +1,54 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "index.go", + "key_index.go", + "kv.go", + "kv_view.go", + "kvstore.go", + "kvstore_compaction.go", + "kvstore_txn.go", + "metrics.go", + "metrics_txn.go", + "revision.go", + "util.go", + "watchable_store.go", + "watchable_store_txn.go", + "watcher.go", + "watcher_group.go", + ], + importpath = "github.com/coreos/etcd/mvcc", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/lease:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/schedule:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/google/btree:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/mvcc/backend:all-srcs", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD new file mode 100644 index 000000000000..5cc8cced8562 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD @@ -0,0 +1,69 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "backend.go", + "batch_tx.go", + "doc.go", + "metrics.go", + "read_tx.go", + "tx_buffer.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "config_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "config_default.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "config_windows.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/coreos/etcd/mvcc/backend", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/bbolt:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go new file mode 100644 index 000000000000..9d053337f78a --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go @@ -0,0 +1,443 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + bolt "github.com/coreos/bbolt" + "github.com/coreos/pkg/capnslog" +) + +var ( + defaultBatchLimit = 10000 + defaultBatchInterval = 100 * time.Millisecond + + defragLimit = 10000 + + // initialMmapSize is the initial size of the mmapped region. Setting this larger than + // the potential max db size can prevent writer from blocking reader. + // This only works for linux. + initialMmapSize = uint64(10 * 1024 * 1024 * 1024) + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend") + + // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. + minSnapshotWarningTimeout = time.Duration(30 * time.Second) +) + +type Backend interface { + ReadTx() ReadTx + BatchTx() BatchTx + + Snapshot() Snapshot + Hash(ignores map[IgnoreKey]struct{}) (uint32, error) + // Size returns the current size of the backend. + Size() int64 + Defrag() error + ForceCommit() + Close() error +} + +type Snapshot interface { + // Size gets the size of the snapshot. + Size() int64 + // WriteTo writes the snapshot into the given writer. + WriteTo(w io.Writer) (n int64, err error) + // Close closes the snapshot. + Close() error +} + +type backend struct { + // size and commits are used with atomic operations so they must be + // 64-bit aligned, otherwise 32-bit tests will crash + + // size is the number of bytes in the backend + size int64 + // commits counts number of commits since start + commits int64 + + mu sync.RWMutex + db *bolt.DB + + batchInterval time.Duration + batchLimit int + batchTx *batchTxBuffered + + readTx *readTx + + stopc chan struct{} + donec chan struct{} +} + +type BackendConfig struct { + // Path is the file path to the backend file. + Path string + // BatchInterval is the maximum time before flushing the BatchTx. + BatchInterval time.Duration + // BatchLimit is the maximum puts before flushing the BatchTx. + BatchLimit int + // MmapSize is the number of bytes to mmap for the backend. + MmapSize uint64 +} + +func DefaultBackendConfig() BackendConfig { + return BackendConfig{ + BatchInterval: defaultBatchInterval, + BatchLimit: defaultBatchLimit, + MmapSize: initialMmapSize, + } +} + +func New(bcfg BackendConfig) Backend { + return newBackend(bcfg) +} + +func NewDefaultBackend(path string) Backend { + bcfg := DefaultBackendConfig() + bcfg.Path = path + return newBackend(bcfg) +} + +func newBackend(bcfg BackendConfig) *backend { + bopts := &bolt.Options{} + if boltOpenOptions != nil { + *bopts = *boltOpenOptions + } + bopts.InitialMmapSize = bcfg.mmapSize() + + db, err := bolt.Open(bcfg.Path, 0600, bopts) + if err != nil { + plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err) + } + + // In future, may want to make buffering optional for low-concurrency systems + // or dynamically swap between buffered/non-buffered depending on workload. + b := &backend{ + db: db, + + batchInterval: bcfg.BatchInterval, + batchLimit: bcfg.BatchLimit, + + readTx: &readTx{buf: txReadBuffer{ + txBuffer: txBuffer{make(map[string]*bucketBuffer)}}, + }, + + stopc: make(chan struct{}), + donec: make(chan struct{}), + } + b.batchTx = newBatchTxBuffered(b) + go b.run() + return b +} + +// BatchTx returns the current batch tx in coalescer. The tx can be used for read and +// write operations. The write result can be retrieved within the same tx immediately. +// The write result is isolated with other txs until the current one get committed. +func (b *backend) BatchTx() BatchTx { + return b.batchTx +} + +func (b *backend) ReadTx() ReadTx { return b.readTx } + +// ForceCommit forces the current batching tx to commit. +func (b *backend) ForceCommit() { + b.batchTx.Commit() +} + +func (b *backend) Snapshot() Snapshot { + b.batchTx.Commit() + + b.mu.RLock() + defer b.mu.RUnlock() + tx, err := b.db.Begin(false) + if err != nil { + plog.Fatalf("cannot begin tx (%s)", err) + } + + stopc, donec := make(chan struct{}), make(chan struct{}) + dbBytes := tx.Size() + go func() { + defer close(donec) + // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection + // assuming a min tcp throughput of 100MB/s. + var sendRateBytes int64 = 100 * 1024 * 1014 + warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second))) + if warningTimeout < minSnapshotWarningTimeout { + warningTimeout = minSnapshotWarningTimeout + } + start := time.Now() + ticker := time.NewTicker(warningTimeout) + defer ticker.Stop() + for { + select { + case <-ticker.C: + plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start) + case <-stopc: + snapshotDurations.Observe(time.Since(start).Seconds()) + return + } + } + }() + + return &snapshot{tx, stopc, donec} +} + +type IgnoreKey struct { + Bucket string + Key string +} + +func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) { + h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + + b.mu.RLock() + defer b.mu.RUnlock() + err := b.db.View(func(tx *bolt.Tx) error { + c := tx.Cursor() + for next, _ := c.First(); next != nil; next, _ = c.Next() { + b := tx.Bucket(next) + if b == nil { + return fmt.Errorf("cannot get hash of bucket %s", string(next)) + } + h.Write(next) + b.ForEach(func(k, v []byte) error { + bk := IgnoreKey{Bucket: string(next), Key: string(k)} + if _, ok := ignores[bk]; !ok { + h.Write(k) + h.Write(v) + } + return nil + }) + } + return nil + }) + + if err != nil { + return 0, err + } + + return h.Sum32(), nil +} + +func (b *backend) Size() int64 { + return atomic.LoadInt64(&b.size) +} + +func (b *backend) run() { + defer close(b.donec) + t := time.NewTimer(b.batchInterval) + defer t.Stop() + for { + select { + case <-t.C: + case <-b.stopc: + b.batchTx.CommitAndStop() + return + } + b.batchTx.Commit() + t.Reset(b.batchInterval) + } +} + +func (b *backend) Close() error { + close(b.stopc) + <-b.donec + return b.db.Close() +} + +// Commits returns total number of commits since start +func (b *backend) Commits() int64 { + return atomic.LoadInt64(&b.commits) +} + +func (b *backend) Defrag() error { + err := b.defrag() + if err != nil { + return err + } + + // commit to update metadata like db.size + b.batchTx.Commit() + + return nil +} + +func (b *backend) defrag() error { + // TODO: make this non-blocking? + // lock batchTx to ensure nobody is using previous tx, and then + // close previous ongoing tx. + b.batchTx.Lock() + defer b.batchTx.Unlock() + + // lock database after lock tx to avoid deadlock. + b.mu.Lock() + defer b.mu.Unlock() + + // block concurrent read requests while resetting tx + b.readTx.mu.Lock() + defer b.readTx.mu.Unlock() + + b.batchTx.unsafeCommit(true) + b.batchTx.tx = nil + + tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions) + if err != nil { + return err + } + + err = defragdb(b.db, tmpdb, defragLimit) + + if err != nil { + tmpdb.Close() + os.RemoveAll(tmpdb.Path()) + return err + } + + dbp := b.db.Path() + tdbp := tmpdb.Path() + + err = b.db.Close() + if err != nil { + plog.Fatalf("cannot close database (%s)", err) + } + err = tmpdb.Close() + if err != nil { + plog.Fatalf("cannot close database (%s)", err) + } + err = os.Rename(tdbp, dbp) + if err != nil { + plog.Fatalf("cannot rename database (%s)", err) + } + + b.db, err = bolt.Open(dbp, 0600, boltOpenOptions) + if err != nil { + plog.Panicf("cannot open database at %s (%v)", dbp, err) + } + b.batchTx.tx, err = b.db.Begin(true) + if err != nil { + plog.Fatalf("cannot begin tx (%s)", err) + } + + b.readTx.buf.reset() + b.readTx.tx = b.unsafeBegin(false) + atomic.StoreInt64(&b.size, b.readTx.tx.Size()) + + return nil +} + +func defragdb(odb, tmpdb *bolt.DB, limit int) error { + // open a tx on tmpdb for writes + tmptx, err := tmpdb.Begin(true) + if err != nil { + return err + } + + // open a tx on old db for read + tx, err := odb.Begin(false) + if err != nil { + return err + } + defer tx.Rollback() + + c := tx.Cursor() + + count := 0 + for next, _ := c.First(); next != nil; next, _ = c.Next() { + b := tx.Bucket(next) + if b == nil { + return fmt.Errorf("backend: cannot defrag bucket %s", string(next)) + } + + tmpb, berr := tmptx.CreateBucketIfNotExists(next) + if berr != nil { + return berr + } + tmpb.FillPercent = 0.9 // for seq write in for each + + b.ForEach(func(k, v []byte) error { + count++ + if count > limit { + err = tmptx.Commit() + if err != nil { + return err + } + tmptx, err = tmpdb.Begin(true) + if err != nil { + return err + } + tmpb = tmptx.Bucket(next) + tmpb.FillPercent = 0.9 // for seq write in for each + + count = 0 + } + return tmpb.Put(k, v) + }) + } + + return tmptx.Commit() +} + +func (b *backend) begin(write bool) *bolt.Tx { + b.mu.RLock() + tx := b.unsafeBegin(write) + b.mu.RUnlock() + atomic.StoreInt64(&b.size, tx.Size()) + return tx +} + +func (b *backend) unsafeBegin(write bool) *bolt.Tx { + tx, err := b.db.Begin(write) + if err != nil { + plog.Fatalf("cannot begin tx (%s)", err) + } + return tx +} + +// NewTmpBackend creates a backend implementation for testing. +func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) { + dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test") + if err != nil { + plog.Fatal(err) + } + tmpPath := filepath.Join(dir, "database") + bcfg := DefaultBackendConfig() + bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit + return newBackend(bcfg), tmpPath +} + +func NewDefaultTmpBackend() (*backend, string) { + return NewTmpBackend(defaultBatchInterval, defaultBatchLimit) +} + +type snapshot struct { + *bolt.Tx + stopc chan struct{} + donec chan struct{} +} + +func (s *snapshot) Close() error { + close(s.stopc) + <-s.donec + return s.Tx.Rollback() +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go new file mode 100644 index 000000000000..e5fb84740899 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go @@ -0,0 +1,266 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + bolt "github.com/coreos/bbolt" +) + +type BatchTx interface { + ReadTx + UnsafeCreateBucket(name []byte) + UnsafePut(bucketName []byte, key []byte, value []byte) + UnsafeSeqPut(bucketName []byte, key []byte, value []byte) + UnsafeDelete(bucketName []byte, key []byte) + // Commit commits a previous tx and begins a new writable one. + Commit() + // CommitAndStop commits the previous tx and does not create a new one. + CommitAndStop() +} + +type batchTx struct { + sync.Mutex + tx *bolt.Tx + backend *backend + + pending int +} + +func (t *batchTx) UnsafeCreateBucket(name []byte) { + _, err := t.tx.CreateBucket(name) + if err != nil && err != bolt.ErrBucketExists { + plog.Fatalf("cannot create bucket %s (%v)", name, err) + } + t.pending++ +} + +// UnsafePut must be called holding the lock on the tx. +func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) { + t.unsafePut(bucketName, key, value, false) +} + +// UnsafeSeqPut must be called holding the lock on the tx. +func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) { + t.unsafePut(bucketName, key, value, true) +} + +func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) { + bucket := t.tx.Bucket(bucketName) + if bucket == nil { + plog.Fatalf("bucket %s does not exist", bucketName) + } + if seq { + // it is useful to increase fill percent when the workloads are mostly append-only. + // this can delay the page split and reduce space usage. + bucket.FillPercent = 0.9 + } + if err := bucket.Put(key, value); err != nil { + plog.Fatalf("cannot put key into bucket (%v)", err) + } + t.pending++ +} + +// UnsafeRange must be called holding the lock on the tx. +func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit) + if err != nil { + plog.Fatal(err) + } + return k, v +} + +func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte, err error) { + bucket := tx.Bucket(bucketName) + if bucket == nil { + return nil, nil, fmt.Errorf("bucket %s does not exist", bucketName) + } + if len(endKey) == 0 { + if v := bucket.Get(key); v != nil { + return append(keys, key), append(vs, v), nil + } + return nil, nil, nil + } + if limit <= 0 { + limit = math.MaxInt64 + } + c := bucket.Cursor() + for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() { + vs = append(vs, cv) + keys = append(keys, ck) + if limit == int64(len(keys)) { + break + } + } + return keys, vs, nil +} + +// UnsafeDelete must be called holding the lock on the tx. +func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) { + bucket := t.tx.Bucket(bucketName) + if bucket == nil { + plog.Fatalf("bucket %s does not exist", bucketName) + } + err := bucket.Delete(key) + if err != nil { + plog.Fatalf("cannot delete key from bucket (%v)", err) + } + t.pending++ +} + +// UnsafeForEach must be called holding the lock on the tx. +func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { + return unsafeForEach(t.tx, bucketName, visitor) +} + +func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error { + if b := tx.Bucket(bucket); b != nil { + return b.ForEach(visitor) + } + return nil +} + +// Commit commits a previous tx and begins a new writable one. +func (t *batchTx) Commit() { + t.Lock() + defer t.Unlock() + t.commit(false) +} + +// CommitAndStop commits the previous tx and does not create a new one. +func (t *batchTx) CommitAndStop() { + t.Lock() + defer t.Unlock() + t.commit(true) +} + +func (t *batchTx) Unlock() { + if t.pending >= t.backend.batchLimit { + t.commit(false) + } + t.Mutex.Unlock() +} + +func (t *batchTx) commit(stop bool) { + // commit the last tx + if t.tx != nil { + if t.pending == 0 && !stop { + t.backend.mu.RLock() + defer t.backend.mu.RUnlock() + + // t.tx.DB()==nil if 'CommitAndStop' calls 'batchTx.commit(true)', + // which initializes *bolt.Tx.db and *bolt.Tx.meta as nil; panics t.tx.Size(). + // Server must make sure 'batchTx.commit(false)' does not follow + // 'batchTx.commit(true)' (e.g. stopping backend, and inflight Hash call). + atomic.StoreInt64(&t.backend.size, t.tx.Size()) + return + } + + start := time.Now() + // gofail: var beforeCommit struct{} + err := t.tx.Commit() + // gofail: var afterCommit struct{} + commitDurations.Observe(time.Since(start).Seconds()) + atomic.AddInt64(&t.backend.commits, 1) + + t.pending = 0 + if err != nil { + plog.Fatalf("cannot commit tx (%s)", err) + } + } + if !stop { + t.tx = t.backend.begin(true) + } +} + +type batchTxBuffered struct { + batchTx + buf txWriteBuffer +} + +func newBatchTxBuffered(backend *backend) *batchTxBuffered { + tx := &batchTxBuffered{ + batchTx: batchTx{backend: backend}, + buf: txWriteBuffer{ + txBuffer: txBuffer{make(map[string]*bucketBuffer)}, + seq: true, + }, + } + tx.Commit() + return tx +} + +func (t *batchTxBuffered) Unlock() { + if t.pending != 0 { + t.backend.readTx.mu.Lock() + t.buf.writeback(&t.backend.readTx.buf) + t.backend.readTx.mu.Unlock() + if t.pending >= t.backend.batchLimit { + t.commit(false) + } + } + t.batchTx.Unlock() +} + +func (t *batchTxBuffered) Commit() { + t.Lock() + defer t.Unlock() + t.commit(false) +} + +func (t *batchTxBuffered) CommitAndStop() { + t.Lock() + defer t.Unlock() + t.commit(true) +} + +func (t *batchTxBuffered) commit(stop bool) { + // all read txs must be closed to acquire boltdb commit rwlock + t.backend.readTx.mu.Lock() + defer t.backend.readTx.mu.Unlock() + t.unsafeCommit(stop) +} + +func (t *batchTxBuffered) unsafeCommit(stop bool) { + if t.backend.readTx.tx != nil { + if err := t.backend.readTx.tx.Rollback(); err != nil { + plog.Fatalf("cannot rollback tx (%s)", err) + } + t.backend.readTx.buf.reset() + t.backend.readTx.tx = nil + } + + t.batchTx.commit(stop) + + if !stop { + t.backend.readTx.tx = t.backend.begin(false) + } +} + +func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) { + t.batchTx.UnsafePut(bucketName, key, value) + t.buf.put(bucketName, key, value) +} + +func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) { + t.batchTx.UnsafeSeqPut(bucketName, key, value) + t.buf.putSeq(bucketName, key, value) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go new file mode 100644 index 000000000000..edfed0025c6c --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go @@ -0,0 +1,23 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!windows + +package backend + +import bolt "github.com/coreos/bbolt" + +var boltOpenOptions *bolt.Options = nil + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go new file mode 100644 index 000000000000..a8f6abeba63e --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go @@ -0,0 +1,33 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "syscall" + + bolt "github.com/coreos/bbolt" +) + +// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead +// which can speed up entire-database read with boltdb. We want to +// enable MAP_POPULATE for faster key-value store recovery in storage +// package. If your kernel version is lower than 2.6.23 +// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might +// silently ignore this flag. Please update your kernel to prevent this. +var boltOpenOptions = &bolt.Options{ + MmapFlags: syscall.MAP_POPULATE, +} + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go new file mode 100644 index 000000000000..71d02700bcdc --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package backend + +import bolt "github.com/coreos/bbolt" + +var boltOpenOptions *bolt.Options = nil + +// setting mmap size != 0 on windows will allocate the entire +// mmap size for the file, instead of growing it. So, force 0. + +func (bcfg *BackendConfig) mmapSize() int { return 0 } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/doc.go b/vendor/github.com/coreos/etcd/mvcc/backend/doc.go new file mode 100644 index 000000000000..9cc42fa793cb --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package backend defines a standard interface for etcd's backend MVCC storage. +package backend diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go new file mode 100644 index 000000000000..30a388014766 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go @@ -0,0 +1,41 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import "github.com/prometheus/client_golang/prometheus" + +var ( + commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "backend_commit_duration_seconds", + Help: "The latency distributions of commit called by backend.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) + + snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "backend_snapshot_duration_seconds", + Help: "The latency distribution of backend snapshots.", + // 10 ms -> 655 seconds + Buckets: prometheus.ExponentialBuckets(.01, 2, 17), + }) +) + +func init() { + prometheus.MustRegister(commitDurations) + prometheus.MustRegister(snapshotDurations) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go new file mode 100644 index 000000000000..9fc6b7906206 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go @@ -0,0 +1,92 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "math" + "sync" + + bolt "github.com/coreos/bbolt" +) + +// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; +// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket +// is known to never overwrite any key so range is safe. +var safeRangeBucket = []byte("key") + +type ReadTx interface { + Lock() + Unlock() + + UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) + UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error +} + +type readTx struct { + // mu protects accesses to the txReadBuffer + mu sync.RWMutex + buf txReadBuffer + + // txmu protects accesses to the Tx on Range requests + txmu sync.Mutex + tx *bolt.Tx +} + +func (rt *readTx) Lock() { rt.mu.RLock() } +func (rt *readTx) Unlock() { rt.mu.RUnlock() } + +func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if endKey == nil { + // forbid duplicates for single keys + limit = 1 + } + if limit <= 0 { + limit = math.MaxInt64 + } + if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) { + panic("do not use unsafeRange on non-keys bucket") + } + keys, vals := rt.buf.Range(bucketName, key, endKey, limit) + if int64(len(keys)) == limit { + return keys, vals + } + rt.txmu.Lock() + // ignore error since bucket may have been created in this batch + k2, v2, _ := unsafeRange(rt.tx, bucketName, key, endKey, limit-int64(len(keys))) + rt.txmu.Unlock() + return append(k2, keys...), append(v2, vals...) +} + +func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { + dups := make(map[string]struct{}) + f1 := func(k, v []byte) error { + dups[string(k)] = struct{}{} + return visitor(k, v) + } + f2 := func(k, v []byte) error { + if _, ok := dups[string(k)]; ok { + return nil + } + return visitor(k, v) + } + if err := rt.buf.ForEach(bucketName, f1); err != nil { + return err + } + rt.txmu.Lock() + err := unsafeForEach(rt.tx, bucketName, f2) + rt.txmu.Unlock() + return err +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go new file mode 100644 index 000000000000..56e885dbfbc3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go @@ -0,0 +1,181 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "sort" +) + +// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer. +type txBuffer struct { + buckets map[string]*bucketBuffer +} + +func (txb *txBuffer) reset() { + for k, v := range txb.buckets { + if v.used == 0 { + // demote + delete(txb.buckets, k) + } + v.used = 0 + } +} + +// txWriteBuffer buffers writes of pending updates that have not yet committed. +type txWriteBuffer struct { + txBuffer + seq bool +} + +func (txw *txWriteBuffer) put(bucket, k, v []byte) { + txw.seq = false + txw.putSeq(bucket, k, v) +} + +func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) { + b, ok := txw.buckets[string(bucket)] + if !ok { + b = newBucketBuffer() + txw.buckets[string(bucket)] = b + } + b.add(k, v) +} + +func (txw *txWriteBuffer) writeback(txr *txReadBuffer) { + for k, wb := range txw.buckets { + rb, ok := txr.buckets[k] + if !ok { + delete(txw.buckets, k) + txr.buckets[k] = wb + continue + } + if !txw.seq && wb.used > 1 { + // assume no duplicate keys + sort.Sort(wb) + } + rb.merge(wb) + } + txw.reset() +} + +// txReadBuffer accesses buffered updates. +type txReadBuffer struct{ txBuffer } + +func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if b := txr.buckets[string(bucketName)]; b != nil { + return b.Range(key, endKey, limit) + } + return nil, nil +} + +func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error { + if b := txr.buckets[string(bucketName)]; b != nil { + return b.ForEach(visitor) + } + return nil +} + +type kv struct { + key []byte + val []byte +} + +// bucketBuffer buffers key-value pairs that are pending commit. +type bucketBuffer struct { + buf []kv + // used tracks number of elements in use so buf can be reused without reallocation. + used int +} + +func newBucketBuffer() *bucketBuffer { + return &bucketBuffer{buf: make([]kv, 512), used: 0} +} + +func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { + f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 } + idx := sort.Search(bb.used, f) + if idx < 0 { + return nil, nil + } + if len(endKey) == 0 { + if bytes.Equal(key, bb.buf[idx].key) { + keys = append(keys, bb.buf[idx].key) + vals = append(vals, bb.buf[idx].val) + } + return keys, vals + } + if bytes.Compare(endKey, bb.buf[idx].key) <= 0 { + return nil, nil + } + for i := idx; i < bb.used && int64(len(keys)) < limit; i++ { + if bytes.Compare(endKey, bb.buf[i].key) <= 0 { + break + } + keys = append(keys, bb.buf[i].key) + vals = append(vals, bb.buf[i].val) + } + return keys, vals +} + +func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error { + for i := 0; i < bb.used; i++ { + if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil { + return err + } + } + return nil +} + +func (bb *bucketBuffer) add(k, v []byte) { + bb.buf[bb.used].key, bb.buf[bb.used].val = k, v + bb.used++ + if bb.used == len(bb.buf) { + buf := make([]kv, (3*len(bb.buf))/2) + copy(buf, bb.buf) + bb.buf = buf + } +} + +// merge merges data from bb into bbsrc. +func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) { + for i := 0; i < bbsrc.used; i++ { + bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val) + } + if bb.used == bbsrc.used { + return + } + if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 { + return + } + + sort.Stable(bb) + + // remove duplicates, using only newest update + widx := 0 + for ridx := 1; ridx < bb.used; ridx++ { + if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) { + widx++ + } + bb.buf[widx] = bb.buf[ridx] + } + bb.used = widx + 1 +} + +func (bb *bucketBuffer) Len() int { return bb.used } +func (bb *bucketBuffer) Less(i, j int) bool { + return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0 +} +func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/github.com/coreos/etcd/mvcc/doc.go new file mode 100644 index 000000000000..ad5be03086fb --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mvcc defines etcd's stable MVCC storage. +package mvcc diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go new file mode 100644 index 000000000000..991289cdd5c4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/index.go @@ -0,0 +1,219 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "sort" + "sync" + + "github.com/google/btree" +) + +type index interface { + Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) + Range(key, end []byte, atRev int64) ([][]byte, []revision) + Put(key []byte, rev revision) + Tombstone(key []byte, rev revision) error + RangeSince(key, end []byte, rev int64) []revision + Compact(rev int64) map[revision]struct{} + Equal(b index) bool + + Insert(ki *keyIndex) + KeyIndex(ki *keyIndex) *keyIndex +} + +type treeIndex struct { + sync.RWMutex + tree *btree.BTree +} + +func newTreeIndex() index { + return &treeIndex{ + tree: btree.New(32), + } +} + +func (ti *treeIndex) Put(key []byte, rev revision) { + keyi := &keyIndex{key: key} + + ti.Lock() + defer ti.Unlock() + item := ti.tree.Get(keyi) + if item == nil { + keyi.put(rev.main, rev.sub) + ti.tree.ReplaceOrInsert(keyi) + return + } + okeyi := item.(*keyIndex) + okeyi.put(rev.main, rev.sub) +} + +func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { + keyi := &keyIndex{key: key} + ti.RLock() + defer ti.RUnlock() + if keyi = ti.keyIndex(keyi); keyi == nil { + return revision{}, revision{}, 0, ErrRevisionNotFound + } + return keyi.get(atRev) +} + +func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { + ti.RLock() + defer ti.RUnlock() + return ti.keyIndex(keyi) +} + +func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { + if item := ti.tree.Get(keyi); item != nil { + return item.(*keyIndex) + } + return nil +} + +func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { + if end == nil { + rev, _, _, err := ti.Get(key, atRev) + if err != nil { + return nil, nil + } + return [][]byte{key}, []revision{rev} + } + + keyi := &keyIndex{key: key} + endi := &keyIndex{key: end} + + ti.RLock() + defer ti.RUnlock() + + ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { + if len(endi.key) > 0 && !item.Less(endi) { + return false + } + curKeyi := item.(*keyIndex) + rev, _, _, err := curKeyi.get(atRev) + if err != nil { + return true + } + revs = append(revs, rev) + keys = append(keys, curKeyi.key) + return true + }) + + return keys, revs +} + +func (ti *treeIndex) Tombstone(key []byte, rev revision) error { + keyi := &keyIndex{key: key} + + ti.Lock() + defer ti.Unlock() + item := ti.tree.Get(keyi) + if item == nil { + return ErrRevisionNotFound + } + + ki := item.(*keyIndex) + return ki.tombstone(rev.main, rev.sub) +} + +// RangeSince returns all revisions from key(including) to end(excluding) +// at or after the given rev. The returned slice is sorted in the order +// of revision. +func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { + ti.RLock() + defer ti.RUnlock() + + keyi := &keyIndex{key: key} + if end == nil { + item := ti.tree.Get(keyi) + if item == nil { + return nil + } + keyi = item.(*keyIndex) + return keyi.since(rev) + } + + endi := &keyIndex{key: end} + var revs []revision + ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { + if len(endi.key) > 0 && !item.Less(endi) { + return false + } + curKeyi := item.(*keyIndex) + revs = append(revs, curKeyi.since(rev)...) + return true + }) + sort.Sort(revisions(revs)) + + return revs +} + +func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { + available := make(map[revision]struct{}) + var emptyki []*keyIndex + plog.Printf("store.index: compact %d", rev) + // TODO: do not hold the lock for long time? + // This is probably OK. Compacting 10M keys takes O(10ms). + ti.Lock() + defer ti.Unlock() + ti.tree.Ascend(compactIndex(rev, available, &emptyki)) + for _, ki := range emptyki { + item := ti.tree.Delete(ki) + if item == nil { + plog.Panic("store.index: unexpected delete failure during compaction") + } + } + return available +} + +func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool { + return func(i btree.Item) bool { + keyi := i.(*keyIndex) + keyi.compact(rev, available) + if keyi.isEmpty() { + *emptyki = append(*emptyki, keyi) + } + return true + } +} + +func (a *treeIndex) Equal(bi index) bool { + b := bi.(*treeIndex) + + if a.tree.Len() != b.tree.Len() { + return false + } + + equal := true + + a.tree.Ascend(func(item btree.Item) bool { + aki := item.(*keyIndex) + bki := b.tree.Get(item).(*keyIndex) + if !aki.equal(bki) { + equal = false + return false + } + return true + }) + + return equal +} + +func (ti *treeIndex) Insert(ki *keyIndex) { + ti.Lock() + defer ti.Unlock() + ti.tree.ReplaceOrInsert(ki) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go new file mode 100644 index 000000000000..9104f9b2d36a --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/key_index.go @@ -0,0 +1,332 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "bytes" + "errors" + "fmt" + + "github.com/google/btree" +) + +var ( + ErrRevisionNotFound = errors.New("mvcc: revision not found") +) + +// keyIndex stores the revisions of a key in the backend. +// Each keyIndex has at least one key generation. +// Each generation might have several key versions. +// Tombstone on a key appends an tombstone version at the end +// of the current generation and creates a new empty generation. +// Each version of a key has an index pointing to the backend. +// +// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo" +// generate a keyIndex: +// key: "foo" +// rev: 5 +// generations: +// {empty} +// {4.0, 5.0(t)} +// {1.0, 2.0, 3.0(t)} +// +// Compact a keyIndex removes the versions with smaller or equal to +// rev except the largest one. If the generation becomes empty +// during compaction, it will be removed. if all the generations get +// removed, the keyIndex should be removed. + +// For example: +// compact(2) on the previous example +// generations: +// {empty} +// {4.0, 5.0(t)} +// {2.0, 3.0(t)} +// +// compact(4) +// generations: +// {empty} +// {4.0, 5.0(t)} +// +// compact(5): +// generations: +// {empty} -> key SHOULD be removed. +// +// compact(6): +// generations: +// {empty} -> key SHOULD be removed. +type keyIndex struct { + key []byte + modified revision // the main rev of the last modification + generations []generation +} + +// put puts a revision to the keyIndex. +func (ki *keyIndex) put(main int64, sub int64) { + rev := revision{main: main, sub: sub} + + if !rev.GreaterThan(ki.modified) { + plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) + } + if len(ki.generations) == 0 { + ki.generations = append(ki.generations, generation{}) + } + g := &ki.generations[len(ki.generations)-1] + if len(g.revs) == 0 { // create a new key + keysGauge.Inc() + g.created = rev + } + g.revs = append(g.revs, rev) + g.ver++ + ki.modified = rev +} + +func (ki *keyIndex) restore(created, modified revision, ver int64) { + if len(ki.generations) != 0 { + plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") + } + + ki.modified = modified + g := generation{created: created, ver: ver, revs: []revision{modified}} + ki.generations = append(ki.generations, g) + keysGauge.Inc() +} + +// tombstone puts a revision, pointing to a tombstone, to the keyIndex. +// It also creates a new empty generation in the keyIndex. +// It returns ErrRevisionNotFound when tombstone on an empty generation. +func (ki *keyIndex) tombstone(main int64, sub int64) error { + if ki.isEmpty() { + plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) + } + if ki.generations[len(ki.generations)-1].isEmpty() { + return ErrRevisionNotFound + } + ki.put(main, sub) + ki.generations = append(ki.generations, generation{}) + keysGauge.Dec() + return nil +} + +// get gets the modified, created revision and version of the key that satisfies the given atRev. +// Rev must be higher than or equal to the given atRev. +func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) { + if ki.isEmpty() { + plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + } + g := ki.findGeneration(atRev) + if g.isEmpty() { + return revision{}, revision{}, 0, ErrRevisionNotFound + } + + n := g.walk(func(rev revision) bool { return rev.main > atRev }) + if n != -1 { + return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil + } + + return revision{}, revision{}, 0, ErrRevisionNotFound +} + +// since returns revisions since the given rev. Only the revision with the +// largest sub revision will be returned if multiple revisions have the same +// main revision. +func (ki *keyIndex) since(rev int64) []revision { + if ki.isEmpty() { + plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + } + since := revision{rev, 0} + var gi int + // find the generations to start checking + for gi = len(ki.generations) - 1; gi > 0; gi-- { + g := ki.generations[gi] + if g.isEmpty() { + continue + } + if since.GreaterThan(g.created) { + break + } + } + + var revs []revision + var last int64 + for ; gi < len(ki.generations); gi++ { + for _, r := range ki.generations[gi].revs { + if since.GreaterThan(r) { + continue + } + if r.main == last { + // replace the revision with a new one that has higher sub value, + // because the original one should not be seen by external + revs[len(revs)-1] = r + continue + } + revs = append(revs, r) + last = r.main + } + } + return revs +} + +// compact compacts a keyIndex by removing the versions with smaller or equal +// revision than the given atRev except the largest one (If the largest one is +// a tombstone, it will not be kept). +// If a generation becomes empty during compaction, it will be removed. +func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { + if ki.isEmpty() { + plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) + } + + // walk until reaching the first revision that has an revision smaller or equal to + // the atRev. + // add it to the available map + f := func(rev revision) bool { + if rev.main <= atRev { + available[rev] = struct{}{} + return false + } + return true + } + + i, g := 0, &ki.generations[0] + // find first generation includes atRev or created after atRev + for i < len(ki.generations)-1 { + if tomb := g.revs[len(g.revs)-1].main; tomb > atRev { + break + } + i++ + g = &ki.generations[i] + } + + if !g.isEmpty() { + n := g.walk(f) + // remove the previous contents. + if n != -1 { + g.revs = g.revs[n:] + } + // remove any tombstone + if len(g.revs) == 1 && i != len(ki.generations)-1 { + delete(available, g.revs[0]) + i++ + } + } + // remove the previous generations. + ki.generations = ki.generations[i:] +} + +func (ki *keyIndex) isEmpty() bool { + return len(ki.generations) == 1 && ki.generations[0].isEmpty() +} + +// findGeneration finds out the generation of the keyIndex that the +// given rev belongs to. If the given rev is at the gap of two generations, +// which means that the key does not exist at the given rev, it returns nil. +func (ki *keyIndex) findGeneration(rev int64) *generation { + lastg := len(ki.generations) - 1 + cg := lastg + + for cg >= 0 { + if len(ki.generations[cg].revs) == 0 { + cg-- + continue + } + g := ki.generations[cg] + if cg != lastg { + if tomb := g.revs[len(g.revs)-1].main; tomb <= rev { + return nil + } + } + if g.revs[0].main <= rev { + return &ki.generations[cg] + } + cg-- + } + return nil +} + +func (a *keyIndex) Less(b btree.Item) bool { + return bytes.Compare(a.key, b.(*keyIndex).key) == -1 +} + +func (a *keyIndex) equal(b *keyIndex) bool { + if !bytes.Equal(a.key, b.key) { + return false + } + if a.modified != b.modified { + return false + } + if len(a.generations) != len(b.generations) { + return false + } + for i := range a.generations { + ag, bg := a.generations[i], b.generations[i] + if !ag.equal(bg) { + return false + } + } + return true +} + +func (ki *keyIndex) String() string { + var s string + for _, g := range ki.generations { + s += g.String() + } + return s +} + +// generation contains multiple revisions of a key. +type generation struct { + ver int64 + created revision // when the generation is created (put in first revision). + revs []revision +} + +func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } + +// walk walks through the revisions in the generation in descending order. +// It passes the revision to the given function. +// walk returns until: 1. it finishes walking all pairs 2. the function returns false. +// walk returns the position at where it stopped. If it stopped after +// finishing walking, -1 will be returned. +func (g *generation) walk(f func(rev revision) bool) int { + l := len(g.revs) + for i := range g.revs { + ok := f(g.revs[l-i-1]) + if !ok { + return l - i - 1 + } + } + return -1 +} + +func (g *generation) String() string { + return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs) +} + +func (a generation) equal(b generation) bool { + if a.ver != b.ver { + return false + } + if len(a.revs) != len(b.revs) { + return false + } + + for i := range a.revs { + ar, br := a.revs[i], b.revs[i] + if ar != br { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go new file mode 100644 index 000000000000..6636347aa431 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kv.go @@ -0,0 +1,147 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +type RangeOptions struct { + Limit int64 + Rev int64 + Count bool +} + +type RangeResult struct { + KVs []mvccpb.KeyValue + Rev int64 + Count int +} + +type ReadView interface { + // FirstRev returns the first KV revision at the time of opening the txn. + // After a compaction, the first revision increases to the compaction + // revision. + FirstRev() int64 + + // Rev returns the revision of the KV at the time of opening the txn. + Rev() int64 + + // Range gets the keys in the range at rangeRev. + // The returned rev is the current revision of the KV when the operation is executed. + // If rangeRev <=0, range gets the keys at currentRev. + // If `end` is nil, the request returns the key. + // If `end` is not nil and not empty, it gets the keys in range [key, range_end). + // If `end` is not nil and empty, it gets the keys greater than or equal to key. + // Limit limits the number of keys returned. + // If the required rev is compacted, ErrCompacted will be returned. + Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) +} + +// TxnRead represents a read-only transaction with operations that will not +// block other read transactions. +type TxnRead interface { + ReadView + // End marks the transaction is complete and ready to commit. + End() +} + +type WriteView interface { + // DeleteRange deletes the given range from the store. + // A deleteRange increases the rev of the store if any key in the range exists. + // The number of key deleted will be returned. + // The returned rev is the current revision of the KV when the operation is executed. + // It also generates one event for each key delete in the event history. + // if the `end` is nil, deleteRange deletes the key. + // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). + DeleteRange(key, end []byte) (n, rev int64) + + // Put puts the given key, value into the store. Put also takes additional argument lease to + // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease + // id. + // A put also increases the rev of the store, and generates one event in the event history. + // The returned rev is the current revision of the KV when the operation is executed. + Put(key, value []byte, lease lease.LeaseID) (rev int64) +} + +// TxnWrite represents a transaction that can modify the store. +type TxnWrite interface { + TxnRead + WriteView + // Changes gets the changes made since opening the write txn. + Changes() []mvccpb.KeyValue +} + +// txnReadWrite coerces a read txn to a write, panicking on any write operation. +type txnReadWrite struct{ TxnRead } + +func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } +func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + panic("unexpected Put") +} +func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } + +func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } + +type KV interface { + ReadView + WriteView + + // Read creates a read transaction. + Read() TxnRead + + // Write creates a write transaction. + Write() TxnWrite + + // Hash retrieves the hash of KV state and revision. + // This method is designed for consistency checking purposes. + Hash() (hash uint32, revision int64, err error) + + // Compact frees all superseded keys with revisions less than rev. + Compact(rev int64) (<-chan struct{}, error) + + // Commit commits outstanding txns into the underlying backend. + Commit() + + // Restore restores the KV store from a backend. + Restore(b backend.Backend) error + Close() error +} + +// WatchableKV is a KV that can be watched. +type WatchableKV interface { + KV + Watchable +} + +// Watchable is the interface that wraps the NewWatchStream function. +type Watchable interface { + // NewWatchStream returns a WatchStream that can be used to + // watch events happened or happening on the KV. + NewWatchStream() WatchStream +} + +// ConsistentWatchableKV is a WatchableKV that understands the consistency +// algorithm and consistent index. +// If the consistent index of executing entry is not larger than the +// consistent index of ConsistentWatchableKV, all operations in +// this entry are skipped and return empty response. +type ConsistentWatchableKV interface { + WatchableKV + // ConsistentIndex returns the current consistent index of the KV. + ConsistentIndex() uint64 +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go new file mode 100644 index 000000000000..f40ba8edc22b --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kv_view.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type readView struct{ kv KV } + +func (rv *readView) FirstRev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.FirstRev() +} + +func (rv *readView) Rev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.Rev() +} + +func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + tr := rv.kv.Read() + defer tr.End() + return tr.Range(key, end, ro) +} + +type writeView struct{ kv KV } + +func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.DeleteRange(key, end) +} + +func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.Put(key, value, lease) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go new file mode 100644 index 000000000000..28a508ccb959 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore.go @@ -0,0 +1,459 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/binary" + "errors" + "math" + "sync" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" + "github.com/coreos/etcd/pkg/schedule" + "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" +) + +var ( + keyBucketName = []byte("key") + metaBucketName = []byte("meta") + + consistentIndexKeyName = []byte("consistent_index") + scheduledCompactKeyName = []byte("scheduledCompactRev") + finishedCompactKeyName = []byte("finishedCompactRev") + + ErrCompacted = errors.New("mvcc: required revision has been compacted") + ErrFutureRev = errors.New("mvcc: required revision is a future revision") + ErrCanceled = errors.New("mvcc: watcher is canceled") + ErrClosed = errors.New("mvcc: closed") + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") +) + +const ( + // markedRevBytesLen is the byte length of marked revision. + // The first `revBytesLen` bytes represents a normal revision. The last + // one byte is the mark. + markedRevBytesLen = revBytesLen + 1 + markBytePosition = markedRevBytesLen - 1 + markTombstone byte = 't' +) + +var restoreChunkKeys = 10000 // non-const for testing + +// ConsistentIndexGetter is an interface that wraps the Get method. +// Consistent index is the offset of an entry in a consistent replicated log. +type ConsistentIndexGetter interface { + // ConsistentIndex returns the consistent index of current executing entry. + ConsistentIndex() uint64 +} + +type store struct { + ReadView + WriteView + + // mu read locks for txns and write locks for non-txn store changes. + mu sync.RWMutex + + ig ConsistentIndexGetter + + b backend.Backend + kvindex index + + le lease.Lessor + + // revMuLock protects currentRev and compactMainRev. + // Locked at end of write txn and released after write txn unlock lock. + // Locked before locking read txn and released after locking. + revMu sync.RWMutex + // currentRev is the revision of the last completed transaction. + currentRev int64 + // compactMainRev is the main revision of the last compaction. + compactMainRev int64 + + // bytesBuf8 is a byte slice of length 8 + // to avoid a repetitive allocation in saveIndex. + bytesBuf8 []byte + + fifoSched schedule.Scheduler + + stopc chan struct{} +} + +// NewStore returns a new store. It is useful to create a store inside +// mvcc pkg. It should only be used for testing externally. +func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { + s := &store{ + b: b, + ig: ig, + kvindex: newTreeIndex(), + + le: le, + + currentRev: 1, + compactMainRev: -1, + + bytesBuf8: make([]byte, 8), + fifoSched: schedule.NewFIFOScheduler(), + + stopc: make(chan struct{}), + } + s.ReadView = &readView{s} + s.WriteView = &writeView{s} + if s.le != nil { + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) + } + + tx := s.b.BatchTx() + tx.Lock() + tx.UnsafeCreateBucket(keyBucketName) + tx.UnsafeCreateBucket(metaBucketName) + tx.Unlock() + s.b.ForceCommit() + + if err := s.restore(); err != nil { + // TODO: return the error instead of panic here? + panic("failed to recover store from backend") + } + + return s +} + +func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { + if ctx == nil || ctx.Err() != nil { + s.mu.Lock() + select { + case <-s.stopc: + default: + f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } + s.fifoSched.Schedule(f) + } + s.mu.Unlock() + return + } + close(ch) +} + +func (s *store) Hash() (hash uint32, revision int64, err error) { + s.b.ForceCommit() + h, err := s.b.Hash(DefaultIgnores) + return h, s.currentRev, err +} + +func (s *store) Compact(rev int64) (<-chan struct{}, error) { + s.mu.Lock() + defer s.mu.Unlock() + s.revMu.Lock() + defer s.revMu.Unlock() + + if rev <= s.compactMainRev { + ch := make(chan struct{}) + f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } + s.fifoSched.Schedule(f) + return ch, ErrCompacted + } + if rev > s.currentRev { + return nil, ErrFutureRev + } + + start := time.Now() + + s.compactMainRev = rev + + rbytes := newRevBytes() + revToBytes(revision{main: rev}, rbytes) + + tx := s.b.BatchTx() + tx.Lock() + tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes) + tx.Unlock() + // ensure that desired compaction is persisted + s.b.ForceCommit() + + keep := s.kvindex.Compact(rev) + ch := make(chan struct{}) + var j = func(ctx context.Context) { + if ctx.Err() != nil { + s.compactBarrier(ctx, ch) + return + } + if !s.scheduleCompaction(rev, keep) { + s.compactBarrier(nil, ch) + return + } + close(ch) + } + + s.fifoSched.Schedule(j) + + indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) + return ch, nil +} + +// DefaultIgnores is a map of keys to ignore in hash checking. +var DefaultIgnores map[backend.IgnoreKey]struct{} + +func init() { + DefaultIgnores = map[backend.IgnoreKey]struct{}{ + // consistent index might be changed due to v2 internal sync, which + // is not controllable by the user. + {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {}, + } +} + +func (s *store) Commit() { + s.mu.Lock() + defer s.mu.Unlock() + + tx := s.b.BatchTx() + tx.Lock() + s.saveIndex(tx) + tx.Unlock() + s.b.ForceCommit() +} + +func (s *store) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + + close(s.stopc) + s.fifoSched.Stop() + + s.b = b + s.kvindex = newTreeIndex() + s.currentRev = 1 + s.compactMainRev = -1 + s.fifoSched = schedule.NewFIFOScheduler() + s.stopc = make(chan struct{}) + + return s.restore() +} + +func (s *store) restore() error { + reportDbTotalSizeInBytesMu.Lock() + b := s.b + reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } + reportDbTotalSizeInBytesMu.Unlock() + + min, max := newRevBytes(), newRevBytes() + revToBytes(revision{main: 1}, min) + revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) + + keyToLease := make(map[string]lease.LeaseID) + + // restore index + tx := s.b.BatchTx() + tx.Lock() + + _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) + if len(finishedCompactBytes) != 0 { + s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main + plog.Printf("restore compact to %d", s.compactMainRev) + } + _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) + scheduledCompact := int64(0) + if len(scheduledCompactBytes) != 0 { + scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main + } + + // index keys concurrently as they're loaded in from tx + keysGauge.Set(0) + rkvc, revc := restoreIntoIndex(s.kvindex) + for { + keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) + if len(keys) == 0 { + break + } + // rkvc blocks if the total pending keys exceeds the restore + // chunk size to keep keys from consuming too much memory. + restoreChunk(rkvc, keys, vals, keyToLease) + if len(keys) < restoreChunkKeys { + // partial set implies final set + break + } + // next set begins after where this one ended + newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) + newMin.sub++ + revToBytes(newMin, min) + } + close(rkvc) + s.currentRev = <-revc + + // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. + // the correct revision should be set to compaction revision in the case, not the largest revision + // we have seen. + if s.currentRev < s.compactMainRev { + s.currentRev = s.compactMainRev + } + if scheduledCompact <= s.compactMainRev { + scheduledCompact = 0 + } + + for key, lid := range keyToLease { + if s.le == nil { + panic("no lessor to attach lease") + } + err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) + if err != nil { + plog.Errorf("unexpected Attach error: %v", err) + } + } + + tx.Unlock() + + if scheduledCompact != 0 { + s.Compact(scheduledCompact) + plog.Printf("resume scheduled compaction at %d", scheduledCompact) + } + + return nil +} + +type revKeyValue struct { + key []byte + kv mvccpb.KeyValue + kstr string +} + +func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { + rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) + go func() { + currentRev := int64(1) + defer func() { revc <- currentRev }() + // restore the tree index from streaming the unordered index. + kiCache := make(map[string]*keyIndex, restoreChunkKeys) + for rkv := range rkvc { + ki, ok := kiCache[rkv.kstr] + // purge kiCache if many keys but still missing in the cache + if !ok && len(kiCache) >= restoreChunkKeys { + i := 10 + for k := range kiCache { + delete(kiCache, k) + if i--; i == 0 { + break + } + } + } + // cache miss, fetch from tree index if there + if !ok { + ki = &keyIndex{key: rkv.kv.Key} + if idxKey := idx.KeyIndex(ki); idxKey != nil { + kiCache[rkv.kstr], ki = idxKey, idxKey + ok = true + } + } + rev := bytesToRev(rkv.key) + currentRev = rev.main + if ok { + if isTombstone(rkv.key) { + ki.tombstone(rev.main, rev.sub) + continue + } + ki.put(rev.main, rev.sub) + } else if !isTombstone(rkv.key) { + ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + idx.Insert(ki) + kiCache[rkv.kstr] = ki + } + } + }() + return rkvc, revc +} + +func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { + for i, key := range keys { + rkv := revKeyValue{key: key} + if err := rkv.kv.Unmarshal(vals[i]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + rkv.kstr = string(rkv.kv.Key) + if isTombstone(key) { + delete(keyToLease, rkv.kstr) + } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { + keyToLease[rkv.kstr] = lid + } else { + delete(keyToLease, rkv.kstr) + } + kvc <- rkv + } +} + +func (s *store) Close() error { + close(s.stopc) + s.fifoSched.Stop() + return nil +} + +func (a *store) Equal(b *store) bool { + if a.currentRev != b.currentRev { + return false + } + if a.compactMainRev != b.compactMainRev { + return false + } + return a.kvindex.Equal(b.kvindex) +} + +func (s *store) saveIndex(tx backend.BatchTx) { + if s.ig == nil { + return + } + bs := s.bytesBuf8 + binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) + // put the index into the underlying backend + // tx has been locked in TxnBegin, so there is no need to lock it again + tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) +} + +func (s *store) ConsistentIndex() uint64 { + // TODO: cache index in a uint64 field? + tx := s.b.BatchTx() + tx.Lock() + defer tx.Unlock() + _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) + if len(vs) == 0 { + return 0 + } + return binary.BigEndian.Uint64(vs[0]) +} + +// appendMarkTombstone appends tombstone mark to normal revision bytes. +func appendMarkTombstone(b []byte) []byte { + if len(b) != revBytesLen { + plog.Panicf("cannot append mark to non normal revision bytes") + } + return append(b, markTombstone) +} + +// isTombstone checks whether the revision bytes is a tombstone. +func isTombstone(b []byte) bool { + return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone +} + +// revBytesRange returns the range of revision bytes at +// the given revision. +func revBytesRange(rev revision) (start, end []byte) { + start = newRevBytes() + revToBytes(rev, start) + + end = newRevBytes() + endRev := revision{main: rev.main, sub: rev.sub + 1} + revToBytes(endRev, end) + + return start, end +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go new file mode 100644 index 000000000000..bbd38f547f3f --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go @@ -0,0 +1,66 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/binary" + "time" +) + +func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { + totalStart := time.Now() + defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond)) + + end := make([]byte, 8) + binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) + + batchsize := int64(10000) + last := make([]byte, 8+1+8) + for { + var rev revision + + start := time.Now() + tx := s.b.BatchTx() + tx.Lock() + + keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize) + for _, key := range keys { + rev = bytesToRev(key) + if _, ok := keep[rev]; !ok { + tx.UnsafeDelete(keyBucketName, key) + } + } + + if len(keys) < int(batchsize) { + rbytes := make([]byte, 8+1+8) + revToBytes(revision{main: compactMainRev}, rbytes) + tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes) + tx.Unlock() + plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) + return true + } + + // update last + revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) + tx.Unlock() + dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) + + select { + case <-time.After(100 * time.Millisecond): + case <-s.stopc: + return false + } + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go new file mode 100644 index 000000000000..13d4d530d0ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go @@ -0,0 +1,253 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +type storeTxnRead struct { + s *store + tx backend.ReadTx + + firstRev int64 + rev int64 +} + +func (s *store) Read() TxnRead { + s.mu.RLock() + tx := s.b.ReadTx() + s.revMu.RLock() + tx.Lock() + firstRev, rev := s.compactMainRev, s.currentRev + s.revMu.RUnlock() + return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) +} + +func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } +func (tr *storeTxnRead) Rev() int64 { return tr.rev } + +func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + return tr.rangeKeys(key, end, tr.Rev(), ro) +} + +func (tr *storeTxnRead) End() { + tr.tx.Unlock() + tr.s.mu.RUnlock() +} + +type storeTxnWrite struct { + *storeTxnRead + tx backend.BatchTx + // beginRev is the revision where the txn begins; it will write to the next revision. + beginRev int64 + changes []mvccpb.KeyValue +} + +func (s *store) Write() TxnWrite { + s.mu.RLock() + tx := s.b.BatchTx() + tx.Lock() + tw := &storeTxnWrite{ + storeTxnRead: &storeTxnRead{s, tx, 0, 0}, + tx: tx, + beginRev: s.currentRev, + changes: make([]mvccpb.KeyValue, 0, 4), + } + return newMetricsTxnWrite(tw) +} + +func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } + +func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + rev := tw.beginRev + if len(tw.changes) > 0 { + rev++ + } + return tw.rangeKeys(key, end, rev, ro) +} + +func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { + if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { + return n, int64(tw.beginRev + 1) + } + return 0, int64(tw.beginRev) +} + +func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { + tw.put(key, value, lease) + return int64(tw.beginRev + 1) +} + +func (tw *storeTxnWrite) End() { + // only update index if the txn modifies the mvcc state. + if len(tw.changes) != 0 { + tw.s.saveIndex(tw.tx) + // hold revMu lock to prevent new read txns from opening until writeback. + tw.s.revMu.Lock() + tw.s.currentRev++ + } + tw.tx.Unlock() + if len(tw.changes) != 0 { + tw.s.revMu.Unlock() + } + tw.s.mu.RUnlock() +} + +func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { + rev := ro.Rev + if rev > curRev { + return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev + } + if rev <= 0 { + rev = curRev + } + if rev < tr.s.compactMainRev { + return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted + } + + _, revpairs := tr.s.kvindex.Range(key, end, int64(rev)) + if len(revpairs) == 0 { + return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil + } + if ro.Count { + return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil + } + + var kvs []mvccpb.KeyValue + for _, revpair := range revpairs { + start, end := revBytesRange(revpair) + _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) + if len(vs) != 1 { + plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + } + + var kv mvccpb.KeyValue + if err := kv.Unmarshal(vs[0]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + kvs = append(kvs, kv) + if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { + break + } + } + return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil +} + +func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { + rev := tw.beginRev + 1 + c := rev + oldLease := lease.NoLease + + // if the key exists before, use its previous created and + // get its previous leaseID + _, created, ver, err := tw.s.kvindex.Get(key, rev) + if err == nil { + c = created.main + oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) + } + + ibytes := newRevBytes() + idxRev := revision{main: rev, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + + ver = ver + 1 + kv := mvccpb.KeyValue{ + Key: key, + Value: value, + CreateRevision: c, + ModRevision: rev, + Version: ver, + Lease: int64(leaseID), + } + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + tw.s.kvindex.Put(key, idxRev) + tw.changes = append(tw.changes, kv) + + if oldLease != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to detach lease") + } + err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + plog.Errorf("unexpected error from lease detach: %v", err) + } + } + if leaseID != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to attach lease") + } + err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + panic("unexpected error from lease Attach") + } + } +} + +func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { + rrev := tw.beginRev + if len(tw.changes) > 0 { + rrev += 1 + } + keys, revs := tw.s.kvindex.Range(key, end, rrev) + if len(keys) == 0 { + return 0 + } + for i, key := range keys { + tw.delete(key, revs[i]) + } + return int64(len(keys)) +} + +func (tw *storeTxnWrite) delete(key []byte, rev revision) { + ibytes := newRevBytes() + idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + ibytes = appendMarkTombstone(ibytes) + + kv := mvccpb.KeyValue{Key: key} + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + err = tw.s.kvindex.Tombstone(key, idxRev) + if err != nil { + plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + } + tw.changes = append(tw.changes, kv) + + item := lease.LeaseItem{Key: string(key)} + leaseID := tw.s.le.GetLease(item) + + if leaseID != lease.NoLease { + err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) + if err != nil { + plog.Errorf("cannot detach %v", err) + } + } +} + +func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go new file mode 100644 index 000000000000..a65fe59b996d --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/metrics.go @@ -0,0 +1,174 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + rangeCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "range_total", + Help: "Total number of ranges seen by this member.", + }) + + putCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "put_total", + Help: "Total number of puts seen by this member.", + }) + + deleteCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "delete_total", + Help: "Total number of deletes seen by this member.", + }) + + txnCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "txn_total", + Help: "Total number of txns seen by this member.", + }) + + keysGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "keys_total", + Help: "Total number of keys.", + }) + + watchStreamGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "watch_stream_total", + Help: "Total number of watch streams.", + }) + + watcherGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "watcher_total", + Help: "Total number of watchers.", + }) + + slowWatcherGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "slow_watcher_total", + Help: "Total number of unsynced slow watchers.", + }) + + totalEventsCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "events_total", + Help: "Total number of events sent by this member.", + }) + + pendingEventsGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "pending_events_total", + Help: "Total number of pending events to be sent.", + }) + + indexCompactionPauseDurations = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "index_compaction_pause_duration_milliseconds", + Help: "Bucketed histogram of index compaction pause duration.", + // 0.5ms -> 1second + Buckets: prometheus.ExponentialBuckets(0.5, 2, 12), + }) + + dbCompactionPauseDurations = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "db_compaction_pause_duration_milliseconds", + Help: "Bucketed histogram of db compaction pause duration.", + // 1ms -> 4second + Buckets: prometheus.ExponentialBuckets(1, 2, 13), + }) + + dbCompactionTotalDurations = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "db_compaction_total_duration_milliseconds", + Help: "Bucketed histogram of db compaction total duration.", + // 100ms -> 800second + Buckets: prometheus.ExponentialBuckets(100, 2, 14), + }) + + dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "db_total_size_in_bytes", + Help: "Total size of the underlying database in bytes.", + }, + func() float64 { + reportDbTotalSizeInBytesMu.RLock() + defer reportDbTotalSizeInBytesMu.RUnlock() + return reportDbTotalSizeInBytes() + }, + ) + // overridden by mvcc initialization + reportDbTotalSizeInBytesMu sync.RWMutex + reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } +) + +func init() { + prometheus.MustRegister(rangeCounter) + prometheus.MustRegister(putCounter) + prometheus.MustRegister(deleteCounter) + prometheus.MustRegister(txnCounter) + prometheus.MustRegister(keysGauge) + prometheus.MustRegister(watchStreamGauge) + prometheus.MustRegister(watcherGauge) + prometheus.MustRegister(slowWatcherGauge) + prometheus.MustRegister(totalEventsCounter) + prometheus.MustRegister(pendingEventsGauge) + prometheus.MustRegister(indexCompactionPauseDurations) + prometheus.MustRegister(dbCompactionPauseDurations) + prometheus.MustRegister(dbCompactionTotalDurations) + prometheus.MustRegister(dbTotalSize) +} + +// ReportEventReceived reports that an event is received. +// This function should be called when the external systems received an +// event from mvcc.Watcher. +func ReportEventReceived(n int) { + pendingEventsGauge.Sub(float64(n)) + totalEventsCounter.Add(float64(n)) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go new file mode 100644 index 000000000000..fd2144279aea --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type metricsTxnWrite struct { + TxnWrite + ranges uint + puts uint + deletes uint +} + +func newMetricsTxnRead(tr TxnRead) TxnRead { + return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} +} + +func newMetricsTxnWrite(tw TxnWrite) TxnWrite { + return &metricsTxnWrite{tw, 0, 0, 0} +} + +func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { + tw.ranges++ + return tw.TxnWrite.Range(key, end, ro) +} + +func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { + tw.deletes++ + return tw.TxnWrite.DeleteRange(key, end) +} + +func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw.puts++ + return tw.TxnWrite.Put(key, value, lease) +} + +func (tw *metricsTxnWrite) End() { + defer tw.TxnWrite.End() + if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { + if sum > 1 { + txnCounter.Inc() + } + return + } + switch { + case tw.ranges == 1: + rangeCounter.Inc() + case tw.puts == 1: + putCounter.Inc() + case tw.deletes == 1: + deleteCounter.Inc() + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/BUILD b/vendor/github.com/coreos/etcd/mvcc/mvccpb/BUILD new file mode 100644 index 000000000000..7f5e3a8e5f09 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["kv.pb.go"], + importpath = "github.com/coreos/etcd/mvcc/mvccpb", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go new file mode 100644 index 000000000000..7033f1326626 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go @@ -0,0 +1,735 @@ +// Code generated by protoc-gen-gogo. +// source: kv.proto +// DO NOT EDIT! + +/* + Package mvccpb is a generated protocol buffer package. + + It is generated from these files: + kv.proto + + It has these top-level messages: + KeyValue + Event +*/ +package mvccpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Event_EventType int32 + +const ( + PUT Event_EventType = 0 + DELETE Event_EventType = 1 +) + +var Event_EventType_name = map[int32]string{ + 0: "PUT", + 1: "DELETE", +} +var Event_EventType_value = map[string]int32{ + "PUT": 0, + "DELETE": 1, +} + +func (x Event_EventType) String() string { + return proto.EnumName(Event_EventType_name, int32(x)) +} +func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } + +type KeyValue struct { + // key is the key in bytes. An empty key is not allowed. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // create_revision is the revision of last creation on this key. + CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` + // mod_revision is the revision of last modification on this key. + ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + // value is the value held by the key, in bytes. + Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } + +type Event struct { + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` + // prev_kv holds the key-value pair before the event happens. + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } + +func init() { + proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") + proto.RegisterType((*Event)(nil), "mvccpb.Event") + proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) +} +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.CreateRevision != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) + } + if m.Version != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Version)) + } + if len(m.Value) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Type)) + } + if m.Kv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) + n1, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PrevKv != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) + n2, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintKv(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *KeyValue) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.CreateRevision != 0 { + n += 1 + sovKv(uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + n += 1 + sovKv(uint64(m.ModRevision)) + } + if m.Version != 0 { + n += 1 + sovKv(uint64(m.Version)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovKv(uint64(m.Lease)) + } + return n +} + +func (m *Event) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovKv(uint64(m.Type)) + } + if m.Kv != nil { + l = m.Kv.Size() + n += 1 + l + sovKv(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovKv(uint64(l)) + } + return n +} + +func sovKv(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozKv(x uint64) (n int) { + return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + m.CreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + m.ModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Event_EventType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kv == nil { + m.Kv = &KeyValue{} + } + if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKv(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthKv + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipKv(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } + +var fileDescriptorKv = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto new file mode 100644 index 000000000000..23c911b7da88 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; +package mvccpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message KeyValue { + // key is the key in bytes. An empty key is not allowed. + bytes key = 1; + // create_revision is the revision of last creation on this key. + int64 create_revision = 2; + // mod_revision is the revision of last modification on this key. + int64 mod_revision = 3; + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + int64 version = 4; + // value is the value held by the key, in bytes. + bytes value = 5; + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + int64 lease = 6; +} + +message Event { + enum EventType { + PUT = 0; + DELETE = 1; + } + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + EventType type = 1; + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + KeyValue kv = 2; + + // prev_kv holds the key-value pair before the event happens. + KeyValue prev_kv = 3; +} diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go new file mode 100644 index 000000000000..5fa35a1c2a26 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/revision.go @@ -0,0 +1,67 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import "encoding/binary" + +// revBytesLen is the byte length of a normal revision. +// First 8 bytes is the revision.main in big-endian format. The 9th byte +// is a '_'. The last 8 bytes is the revision.sub in big-endian format. +const revBytesLen = 8 + 1 + 8 + +// A revision indicates modification of the key-value space. +// The set of changes that share same main revision changes the key-value space atomically. +type revision struct { + // main is the main revision of a set of changes that happen atomically. + main int64 + + // sub is the the sub revision of a change in a set of changes that happen + // atomically. Each change has different increasing sub revision in that + // set. + sub int64 +} + +func (a revision) GreaterThan(b revision) bool { + if a.main > b.main { + return true + } + if a.main < b.main { + return false + } + return a.sub > b.sub +} + +func newRevBytes() []byte { + return make([]byte, revBytesLen, markedRevBytesLen) +} + +func revToBytes(rev revision, bytes []byte) { + binary.BigEndian.PutUint64(bytes, uint64(rev.main)) + bytes[8] = '_' + binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub)) +} + +func bytesToRev(bytes []byte) revision { + return revision{ + main: int64(binary.BigEndian.Uint64(bytes[0:8])), + sub: int64(binary.BigEndian.Uint64(bytes[9:])), + } +} + +type revisions []revision + +func (a revisions) Len() int { return len(a) } +func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) } +func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go new file mode 100644 index 000000000000..8a0df0bfcc37 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/util.go @@ -0,0 +1,56 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/binary" + + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +func UpdateConsistentIndex(be backend.Backend, index uint64) { + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var oldi uint64 + _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) + if len(vs) != 0 { + oldi = binary.BigEndian.Uint64(vs[0]) + } + + if index <= oldi { + return + } + + bs := make([]byte, 8) + binary.BigEndian.PutUint64(bs, index) + tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) +} + +func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { + ibytes := newRevBytes() + revToBytes(revision{main: kv.ModRevision}, ibytes) + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + be.BatchTx().Lock() + be.BatchTx().UnsafePut(keyBucketName, ibytes, d) + be.BatchTx().Unlock() +} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go new file mode 100644 index 000000000000..68d9ab71d271 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go @@ -0,0 +1,522 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "sync" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +const ( + // chanBufLen is the length of the buffered chan + // for sending out watched events. + // TODO: find a good buf value. 1024 is just a random one that + // seems to be reasonable. + chanBufLen = 1024 + + // maxWatchersPerSync is the number of watchers to sync in a single batch + maxWatchersPerSync = 512 +) + +type watchable interface { + watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) + progress(w *watcher) + rev() int64 +} + +type watchableStore struct { + *store + + // mu protects watcher groups and batches. It should never be locked + // before locking store.mu to avoid deadlock. + mu sync.RWMutex + + // victims are watcher batches that were blocked on the watch channel + victims []watcherBatch + victimc chan struct{} + + // contains all unsynced watchers that needs to sync with events that have happened + unsynced watcherGroup + + // contains all synced watchers that are in sync with the progress of the store. + // The key of the map is the key that the watcher watches on. + synced watcherGroup + + stopc chan struct{} + wg sync.WaitGroup +} + +// cancelFunc updates unsynced and synced maps when running +// cancel operations. +type cancelFunc func() + +func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV { + return newWatchableStore(b, le, ig) +} + +func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore { + s := &watchableStore{ + store: NewStore(b, le, ig), + victimc: make(chan struct{}, 1), + unsynced: newWatcherGroup(), + synced: newWatcherGroup(), + stopc: make(chan struct{}), + } + s.store.ReadView = &readView{s} + s.store.WriteView = &writeView{s} + if s.le != nil { + // use this store as the deleter so revokes trigger watch events + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) + } + s.wg.Add(2) + go s.syncWatchersLoop() + go s.syncVictimsLoop() + return s +} + +func (s *watchableStore) Close() error { + close(s.stopc) + s.wg.Wait() + return s.store.Close() +} + +func (s *watchableStore) NewWatchStream() WatchStream { + watchStreamGauge.Inc() + return &watchStream{ + watchable: s, + ch: make(chan WatchResponse, chanBufLen), + cancels: make(map[WatchID]cancelFunc), + watchers: make(map[WatchID]*watcher), + } +} + +func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { + wa := &watcher{ + key: key, + end: end, + minRev: startRev, + id: id, + ch: ch, + fcs: fcs, + } + + s.mu.Lock() + s.revMu.RLock() + synced := startRev > s.store.currentRev || startRev == 0 + if synced { + wa.minRev = s.store.currentRev + 1 + if startRev > wa.minRev { + wa.minRev = startRev + } + } + if synced { + s.synced.add(wa) + } else { + slowWatcherGauge.Inc() + s.unsynced.add(wa) + } + s.revMu.RUnlock() + s.mu.Unlock() + + watcherGauge.Inc() + + return wa, func() { s.cancelWatcher(wa) } +} + +// cancelWatcher removes references of the watcher from the watchableStore +func (s *watchableStore) cancelWatcher(wa *watcher) { + for { + s.mu.Lock() + + if s.unsynced.delete(wa) { + slowWatcherGauge.Dec() + break + } else if s.synced.delete(wa) { + break + } else if wa.compacted { + break + } + + if !wa.victim { + panic("watcher not victim but not in watch groups") + } + + var victimBatch watcherBatch + for _, wb := range s.victims { + if wb[wa] != nil { + victimBatch = wb + break + } + } + if victimBatch != nil { + slowWatcherGauge.Dec() + delete(victimBatch, wa) + break + } + + // victim being processed so not accessible; retry + s.mu.Unlock() + time.Sleep(time.Millisecond) + } + + watcherGauge.Dec() + s.mu.Unlock() +} + +func (s *watchableStore) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + err := s.store.Restore(b) + if err != nil { + return err + } + + for wa := range s.synced.watchers { + s.unsynced.watchers.add(wa) + } + s.synced = newWatcherGroup() + return nil +} + +// syncWatchersLoop syncs the watcher in the unsynced map every 100ms. +func (s *watchableStore) syncWatchersLoop() { + defer s.wg.Done() + + for { + s.mu.RLock() + st := time.Now() + lastUnsyncedWatchers := s.unsynced.size() + s.mu.RUnlock() + + unsyncedWatchers := 0 + if lastUnsyncedWatchers > 0 { + unsyncedWatchers = s.syncWatchers() + } + syncDuration := time.Since(st) + + waitDuration := 100 * time.Millisecond + // more work pending? + if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers { + // be fair to other store operations by yielding time taken + waitDuration = syncDuration + } + + select { + case <-time.After(waitDuration): + case <-s.stopc: + return + } + } +} + +// syncVictimsLoop tries to write precomputed watcher responses to +// watchers that had a blocked watcher channel +func (s *watchableStore) syncVictimsLoop() { + defer s.wg.Done() + + for { + for s.moveVictims() != 0 { + // try to update all victim watchers + } + s.mu.RLock() + isEmpty := len(s.victims) == 0 + s.mu.RUnlock() + + var tickc <-chan time.Time + if !isEmpty { + tickc = time.After(10 * time.Millisecond) + } + + select { + case <-tickc: + case <-s.victimc: + case <-s.stopc: + return + } + } +} + +// moveVictims tries to update watches with already pending event data +func (s *watchableStore) moveVictims() (moved int) { + s.mu.Lock() + victims := s.victims + s.victims = nil + s.mu.Unlock() + + var newVictim watcherBatch + for _, wb := range victims { + // try to send responses again + for w, eb := range wb { + // watcher has observed the store up to, but not including, w.minRev + rev := w.minRev - 1 + if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { + pendingEventsGauge.Add(float64(len(eb.evs))) + } else { + if newVictim == nil { + newVictim = make(watcherBatch) + } + newVictim[w] = eb + continue + } + moved++ + } + + // assign completed victim watchers to unsync/sync + s.mu.Lock() + s.store.revMu.RLock() + curRev := s.store.currentRev + for w, eb := range wb { + if newVictim != nil && newVictim[w] != nil { + // couldn't send watch response; stays victim + continue + } + w.victim = false + if eb.moreRev != 0 { + w.minRev = eb.moreRev + } + if w.minRev <= curRev { + s.unsynced.add(w) + } else { + slowWatcherGauge.Dec() + s.synced.add(w) + } + } + s.store.revMu.RUnlock() + s.mu.Unlock() + } + + if len(newVictim) > 0 { + s.mu.Lock() + s.victims = append(s.victims, newVictim) + s.mu.Unlock() + } + + return moved +} + +// syncWatchers syncs unsynced watchers by: +// 1. choose a set of watchers from the unsynced watcher group +// 2. iterate over the set to get the minimum revision and remove compacted watchers +// 3. use minimum revision to get all key-value pairs and send those events to watchers +// 4. remove synced watchers in set from unsynced group and move to synced group +func (s *watchableStore) syncWatchers() int { + s.mu.Lock() + defer s.mu.Unlock() + + if s.unsynced.size() == 0 { + return 0 + } + + s.store.revMu.RLock() + defer s.store.revMu.RUnlock() + + // in order to find key-value pairs from unsynced watchers, we need to + // find min revision index, and these revisions can be used to + // query the backend store of key-value pairs + curRev := s.store.currentRev + compactionRev := s.store.compactMainRev + + wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) + minBytes, maxBytes := newRevBytes(), newRevBytes() + revToBytes(revision{main: minRev}, minBytes) + revToBytes(revision{main: curRev + 1}, maxBytes) + + // UnsafeRange returns keys and values. And in boltdb, keys are revisions. + // values are actual key-value pairs in backend. + tx := s.store.b.ReadTx() + tx.Lock() + revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) + evs := kvsToEvents(wg, revs, vs) + tx.Unlock() + + var victims watcherBatch + wb := newWatcherBatch(wg, evs) + for w := range wg.watchers { + w.minRev = curRev + 1 + + eb, ok := wb[w] + if !ok { + // bring un-notified watcher to synced + s.synced.add(w) + s.unsynced.delete(w) + continue + } + + if eb.moreRev != 0 { + w.minRev = eb.moreRev + } + + if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { + pendingEventsGauge.Add(float64(len(eb.evs))) + } else { + if victims == nil { + victims = make(watcherBatch) + } + w.victim = true + } + + if w.victim { + victims[w] = eb + } else { + if eb.moreRev != 0 { + // stay unsynced; more to read + continue + } + s.synced.add(w) + } + s.unsynced.delete(w) + } + s.addVictim(victims) + + vsz := 0 + for _, v := range s.victims { + vsz += len(v) + } + slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) + + return s.unsynced.size() +} + +// kvsToEvents gets all events for the watchers from all key-value pairs +func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { + for i, v := range vals { + var kv mvccpb.KeyValue + if err := kv.Unmarshal(v); err != nil { + plog.Panicf("cannot unmarshal event: %v", err) + } + + if !wg.contains(string(kv.Key)) { + continue + } + + ty := mvccpb.PUT + if isTombstone(revs[i]) { + ty = mvccpb.DELETE + // patch in mod revision so watchers won't skip + kv.ModRevision = bytesToRev(revs[i]).main + } + evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) + } + return evs +} + +// notify notifies the fact that given event at the given rev just happened to +// watchers that watch on the key of the event. +func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { + var victim watcherBatch + for w, eb := range newWatcherBatch(&s.synced, evs) { + if eb.revs != 1 { + plog.Panicf("unexpected multiple revisions in notification") + } + + if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { + pendingEventsGauge.Add(float64(len(eb.evs))) + } else { + // move slow watcher to victims + w.minRev = rev + 1 + if victim == nil { + victim = make(watcherBatch) + } + w.victim = true + victim[w] = eb + s.synced.delete(w) + slowWatcherGauge.Inc() + } + } + s.addVictim(victim) +} + +func (s *watchableStore) addVictim(victim watcherBatch) { + if victim == nil { + return + } + s.victims = append(s.victims, victim) + select { + case s.victimc <- struct{}{}: + default: + } +} + +func (s *watchableStore) rev() int64 { return s.store.Rev() } + +func (s *watchableStore) progress(w *watcher) { + s.mu.RLock() + defer s.mu.RUnlock() + + if _, ok := s.synced.watchers[w]; ok { + w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) + // If the ch is full, this watcher is receiving events. + // We do not need to send progress at all. + } +} + +type watcher struct { + // the watcher key + key []byte + // end indicates the end of the range to watch. + // If end is set, the watcher is on a range. + end []byte + + // victim is set when ch is blocked and undergoing victim processing + victim bool + + // compacted is set when the watcher is removed because of compaction + compacted bool + + // minRev is the minimum revision update the watcher will accept + minRev int64 + id WatchID + + fcs []FilterFunc + // a chan to send out the watch response. + // The chan might be shared with other watchers. + ch chan<- WatchResponse +} + +func (w *watcher) send(wr WatchResponse) bool { + progressEvent := len(wr.Events) == 0 + + if len(w.fcs) != 0 { + ne := make([]mvccpb.Event, 0, len(wr.Events)) + for i := range wr.Events { + filtered := false + for _, filter := range w.fcs { + if filter(wr.Events[i]) { + filtered = true + break + } + } + if !filtered { + ne = append(ne, wr.Events[i]) + } + } + wr.Events = ne + } + + // if all events are filtered out, we should send nothing. + if !progressEvent && len(wr.Events) == 0 { + return true + } + select { + case w.ch <- wr: + return true + default: + return false + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go new file mode 100644 index 000000000000..5c5bfda13413 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/mvcc/mvccpb" +) + +func (tw *watchableStoreTxnWrite) End() { + changes := tw.Changes() + if len(changes) == 0 { + tw.TxnWrite.End() + return + } + + rev := tw.Rev() + 1 + evs := make([]mvccpb.Event, len(changes)) + for i, change := range changes { + evs[i].Kv = &changes[i] + if change.CreateRevision == 0 { + evs[i].Type = mvccpb.DELETE + evs[i].Kv.ModRevision = rev + } else { + evs[i].Type = mvccpb.PUT + } + } + + // end write txn under watchable store lock so the updates are visible + // when asynchronous event posting checks the current store revision + tw.s.mu.Lock() + tw.s.notify(rev, evs) + tw.TxnWrite.End() + tw.s.mu.Unlock() +} + +type watchableStoreTxnWrite struct { + TxnWrite + s *watchableStore +} + +func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go new file mode 100644 index 000000000000..9468d42695ca --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watcher.go @@ -0,0 +1,171 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "bytes" + "errors" + "sync" + + "github.com/coreos/etcd/mvcc/mvccpb" +) + +var ( + ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") +) + +type WatchID int64 + +// FilterFunc returns true if the given event should be filtered out. +type FilterFunc func(e mvccpb.Event) bool + +type WatchStream interface { + // Watch creates a watcher. The watcher watches the events happening or + // happened on the given key or range [key, end) from the given startRev. + // + // The whole event history can be watched unless compacted. + // If `startRev` <=0, watch observes events after currentRev. + // + // The returned `id` is the ID of this watcher. It appears as WatchID + // in events that are sent to the created watcher through stream channel. + // + Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID + + // Chan returns a chan. All watch response will be sent to the returned chan. + Chan() <-chan WatchResponse + + // RequestProgress requests the progress of the watcher with given ID. The response + // will only be sent if the watcher is currently synced. + // The responses will be sent through the WatchRespone Chan attached + // with this stream to ensure correct ordering. + // The responses contains no events. The revision in the response is the progress + // of the watchers since the watcher is currently synced. + RequestProgress(id WatchID) + + // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be + // returned. + Cancel(id WatchID) error + + // Close closes Chan and release all related resources. + Close() + + // Rev returns the current revision of the KV the stream watches on. + Rev() int64 +} + +type WatchResponse struct { + // WatchID is the WatchID of the watcher this response sent to. + WatchID WatchID + + // Events contains all the events that needs to send. + Events []mvccpb.Event + + // Revision is the revision of the KV when the watchResponse is created. + // For a normal response, the revision should be the same as the last + // modified revision inside Events. For a delayed response to a unsynced + // watcher, the revision is greater than the last modified revision + // inside Events. + Revision int64 + + // CompactRevision is set when the watcher is cancelled due to compaction. + CompactRevision int64 +} + +// watchStream contains a collection of watchers that share +// one streaming chan to send out watched events and other control events. +type watchStream struct { + watchable watchable + ch chan WatchResponse + + mu sync.Mutex // guards fields below it + // nextID is the ID pre-allocated for next new watcher in this stream + nextID WatchID + closed bool + cancels map[WatchID]cancelFunc + watchers map[WatchID]*watcher +} + +// Watch creates a new watcher in the stream and returns its WatchID. +// TODO: return error if ws is closed? +func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID { + // prevent wrong range where key >= end lexicographically + // watch request with 'WithFromKey' has empty-byte range end + if len(end) != 0 && bytes.Compare(key, end) != -1 { + return -1 + } + + ws.mu.Lock() + defer ws.mu.Unlock() + if ws.closed { + return -1 + } + + id := ws.nextID + ws.nextID++ + + w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...) + + ws.cancels[id] = c + ws.watchers[id] = w + return id +} + +func (ws *watchStream) Chan() <-chan WatchResponse { + return ws.ch +} + +func (ws *watchStream) Cancel(id WatchID) error { + ws.mu.Lock() + cancel, ok := ws.cancels[id] + ok = ok && !ws.closed + if ok { + delete(ws.cancels, id) + delete(ws.watchers, id) + } + ws.mu.Unlock() + if !ok { + return ErrWatcherNotExist + } + cancel() + return nil +} + +func (ws *watchStream) Close() { + ws.mu.Lock() + defer ws.mu.Unlock() + + for _, cancel := range ws.cancels { + cancel() + } + ws.closed = true + close(ws.ch) + watchStreamGauge.Dec() +} + +func (ws *watchStream) Rev() int64 { + ws.mu.Lock() + defer ws.mu.Unlock() + return ws.watchable.rev() +} + +func (ws *watchStream) RequestProgress(id WatchID) { + ws.mu.Lock() + w, ok := ws.watchers[id] + ws.mu.Unlock() + if !ok { + return + } + ws.watchable.progress(w) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go new file mode 100644 index 000000000000..6ef1d0ce8bbe --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go @@ -0,0 +1,283 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "math" + + "github.com/coreos/etcd/mvcc/mvccpb" + "github.com/coreos/etcd/pkg/adt" +) + +var ( + // watchBatchMaxRevs is the maximum distinct revisions that + // may be sent to an unsynced watcher at a time. Declared as + // var instead of const for testing purposes. + watchBatchMaxRevs = 1000 +) + +type eventBatch struct { + // evs is a batch of revision-ordered events + evs []mvccpb.Event + // revs is the minimum unique revisions observed for this batch + revs int + // moreRev is first revision with more events following this batch + moreRev int64 +} + +func (eb *eventBatch) add(ev mvccpb.Event) { + if eb.revs > watchBatchMaxRevs { + // maxed out batch size + return + } + + if len(eb.evs) == 0 { + // base case + eb.revs = 1 + eb.evs = append(eb.evs, ev) + return + } + + // revision accounting + ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision + evRev := ev.Kv.ModRevision + if evRev > ebRev { + eb.revs++ + if eb.revs > watchBatchMaxRevs { + eb.moreRev = evRev + return + } + } + + eb.evs = append(eb.evs, ev) +} + +type watcherBatch map[*watcher]*eventBatch + +func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) { + eb := wb[w] + if eb == nil { + eb = &eventBatch{} + wb[w] = eb + } + eb.add(ev) +} + +// newWatcherBatch maps watchers to their matched events. It enables quick +// events look up by watcher. +func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch { + if len(wg.watchers) == 0 { + return nil + } + + wb := make(watcherBatch) + for _, ev := range evs { + for w := range wg.watcherSetByKey(string(ev.Kv.Key)) { + if ev.Kv.ModRevision >= w.minRev { + // don't double notify + wb.add(w, ev) + } + } + } + return wb +} + +type watcherSet map[*watcher]struct{} + +func (w watcherSet) add(wa *watcher) { + if _, ok := w[wa]; ok { + panic("add watcher twice!") + } + w[wa] = struct{}{} +} + +func (w watcherSet) union(ws watcherSet) { + for wa := range ws { + w.add(wa) + } +} + +func (w watcherSet) delete(wa *watcher) { + if _, ok := w[wa]; !ok { + panic("removing missing watcher!") + } + delete(w, wa) +} + +type watcherSetByKey map[string]watcherSet + +func (w watcherSetByKey) add(wa *watcher) { + set := w[string(wa.key)] + if set == nil { + set = make(watcherSet) + w[string(wa.key)] = set + } + set.add(wa) +} + +func (w watcherSetByKey) delete(wa *watcher) bool { + k := string(wa.key) + if v, ok := w[k]; ok { + if _, ok := v[wa]; ok { + delete(v, wa) + if len(v) == 0 { + // remove the set; nothing left + delete(w, k) + } + return true + } + } + return false +} + +// watcherGroup is a collection of watchers organized by their ranges +type watcherGroup struct { + // keyWatchers has the watchers that watch on a single key + keyWatchers watcherSetByKey + // ranges has the watchers that watch a range; it is sorted by interval + ranges adt.IntervalTree + // watchers is the set of all watchers + watchers watcherSet +} + +func newWatcherGroup() watcherGroup { + return watcherGroup{ + keyWatchers: make(watcherSetByKey), + watchers: make(watcherSet), + } +} + +// add puts a watcher in the group. +func (wg *watcherGroup) add(wa *watcher) { + wg.watchers.add(wa) + if wa.end == nil { + wg.keyWatchers.add(wa) + return + } + + // interval already registered? + ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) + if iv := wg.ranges.Find(ivl); iv != nil { + iv.Val.(watcherSet).add(wa) + return + } + + // not registered, put in interval tree + ws := make(watcherSet) + ws.add(wa) + wg.ranges.Insert(ivl, ws) +} + +// contains is whether the given key has a watcher in the group. +func (wg *watcherGroup) contains(key string) bool { + _, ok := wg.keyWatchers[key] + return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) +} + +// size gives the number of unique watchers in the group. +func (wg *watcherGroup) size() int { return len(wg.watchers) } + +// delete removes a watcher from the group. +func (wg *watcherGroup) delete(wa *watcher) bool { + if _, ok := wg.watchers[wa]; !ok { + return false + } + wg.watchers.delete(wa) + if wa.end == nil { + wg.keyWatchers.delete(wa) + return true + } + + ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) + iv := wg.ranges.Find(ivl) + if iv == nil { + return false + } + + ws := iv.Val.(watcherSet) + delete(ws, wa) + if len(ws) == 0 { + // remove interval missing watchers + if ok := wg.ranges.Delete(ivl); !ok { + panic("could not remove watcher from interval tree") + } + } + + return true +} + +// choose selects watchers from the watcher group to update +func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) { + if len(wg.watchers) < maxWatchers { + return wg, wg.chooseAll(curRev, compactRev) + } + ret := newWatcherGroup() + for w := range wg.watchers { + if maxWatchers <= 0 { + break + } + maxWatchers-- + ret.add(w) + } + return &ret, ret.chooseAll(curRev, compactRev) +} + +func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 { + minRev := int64(math.MaxInt64) + for w := range wg.watchers { + if w.minRev > curRev { + panic("watcher current revision should not exceed current revision") + } + if w.minRev < compactRev { + select { + case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}: + w.compacted = true + wg.delete(w) + default: + // retry next time + } + continue + } + if minRev > w.minRev { + minRev = w.minRev + } + } + return minRev +} + +// watcherSetByKey gets the set of watchers that receive events on the given key. +func (wg *watcherGroup) watcherSetByKey(key string) watcherSet { + wkeys := wg.keyWatchers[key] + wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key)) + + // zero-copy cases + switch { + case len(wranges) == 0: + // no need to merge ranges or copy; reuse single-key set + return wkeys + case len(wranges) == 0 && len(wkeys) == 0: + return nil + case len(wranges) == 1 && len(wkeys) == 0: + return wranges[0].Val.(watcherSet) + } + + // copy case + ret := make(watcherSet) + ret.union(wg.keyWatchers[key]) + for _, item := range wranges { + ret.union(item.Val.(watcherSet)) + } + return ret +} diff --git a/vendor/github.com/coreos/etcd/pkg/adt/BUILD b/vendor/github.com/coreos/etcd/pkg/adt/BUILD new file mode 100644 index 000000000000..b0bab438d4ee --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/adt/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "interval_tree.go", + ], + importpath = "github.com/coreos/etcd/pkg/adt", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/adt/doc.go b/vendor/github.com/coreos/etcd/pkg/adt/doc.go new file mode 100644 index 000000000000..1a9559145b31 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/adt/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package adt implements useful abstract data types. +package adt diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go new file mode 100644 index 000000000000..9769771ea4fd --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go @@ -0,0 +1,590 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adt + +import ( + "bytes" + "math" +) + +// Comparable is an interface for trichotomic comparisons. +type Comparable interface { + // Compare gives the result of a 3-way comparison + // a.Compare(b) = 1 => a > b + // a.Compare(b) = 0 => a == b + // a.Compare(b) = -1 => a < b + Compare(c Comparable) int +} + +type rbcolor int + +const ( + black rbcolor = iota + red +) + +// Interval implements a Comparable interval [begin, end) +// TODO: support different sorts of intervals: (a,b), [a,b], (a, b] +type Interval struct { + Begin Comparable + End Comparable +} + +// Compare on an interval gives == if the interval overlaps. +func (ivl *Interval) Compare(c Comparable) int { + ivl2 := c.(*Interval) + ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin) + ivbCmpEnd := ivl.Begin.Compare(ivl2.End) + iveCmpBegin := ivl.End.Compare(ivl2.Begin) + + // ivl is left of ivl2 + if ivbCmpBegin < 0 && iveCmpBegin <= 0 { + return -1 + } + + // iv is right of iv2 + if ivbCmpEnd >= 0 { + return 1 + } + + return 0 +} + +type intervalNode struct { + // iv is the interval-value pair entry. + iv IntervalValue + // max endpoint of all descendent nodes. + max Comparable + // left and right are sorted by low endpoint of key interval + left, right *intervalNode + // parent is the direct ancestor of the node + parent *intervalNode + c rbcolor +} + +func (x *intervalNode) color() rbcolor { + if x == nil { + return black + } + return x.c +} + +func (n *intervalNode) height() int { + if n == nil { + return 0 + } + ld := n.left.height() + rd := n.right.height() + if ld < rd { + return rd + 1 + } + return ld + 1 +} + +func (x *intervalNode) min() *intervalNode { + for x.left != nil { + x = x.left + } + return x +} + +// successor is the next in-order node in the tree +func (x *intervalNode) successor() *intervalNode { + if x.right != nil { + return x.right.min() + } + y := x.parent + for y != nil && x == y.right { + x = y + y = y.parent + } + return y +} + +// updateMax updates the maximum values for a node and its ancestors +func (x *intervalNode) updateMax() { + for x != nil { + oldmax := x.max + max := x.iv.Ivl.End + if x.left != nil && x.left.max.Compare(max) > 0 { + max = x.left.max + } + if x.right != nil && x.right.max.Compare(max) > 0 { + max = x.right.max + } + if oldmax.Compare(max) == 0 { + break + } + x.max = max + x = x.parent + } +} + +type nodeVisitor func(n *intervalNode) bool + +// visit will call a node visitor on each node that overlaps the given interval +func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool { + if x == nil { + return true + } + v := iv.Compare(&x.iv.Ivl) + switch { + case v < 0: + if !x.left.visit(iv, nv) { + return false + } + case v > 0: + maxiv := Interval{x.iv.Ivl.Begin, x.max} + if maxiv.Compare(iv) == 0 { + if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) { + return false + } + } + default: + if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) { + return false + } + } + return true +} + +type IntervalValue struct { + Ivl Interval + Val interface{} +} + +// IntervalTree represents a (mostly) textbook implementation of the +// "Introduction to Algorithms" (Cormen et al, 2nd ed.) chapter 13 red-black tree +// and chapter 14.3 interval tree with search supporting "stabbing queries". +type IntervalTree struct { + root *intervalNode + count int +} + +// Delete removes the node with the given interval from the tree, returning +// true if a node is in fact removed. +func (ivt *IntervalTree) Delete(ivl Interval) bool { + z := ivt.find(ivl) + if z == nil { + return false + } + + y := z + if z.left != nil && z.right != nil { + y = z.successor() + } + + x := y.left + if x == nil { + x = y.right + } + if x != nil { + x.parent = y.parent + } + + if y.parent == nil { + ivt.root = x + } else { + if y == y.parent.left { + y.parent.left = x + } else { + y.parent.right = x + } + y.parent.updateMax() + } + if y != z { + z.iv = y.iv + z.updateMax() + } + + if y.color() == black && x != nil { + ivt.deleteFixup(x) + } + + ivt.count-- + return true +} + +func (ivt *IntervalTree) deleteFixup(x *intervalNode) { + for x != ivt.root && x.color() == black && x.parent != nil { + if x == x.parent.left { + w := x.parent.right + if w.color() == red { + w.c = black + x.parent.c = red + ivt.rotateLeft(x.parent) + w = x.parent.right + } + if w == nil { + break + } + if w.left.color() == black && w.right.color() == black { + w.c = red + x = x.parent + } else { + if w.right.color() == black { + w.left.c = black + w.c = red + ivt.rotateRight(w) + w = x.parent.right + } + w.c = x.parent.color() + x.parent.c = black + w.right.c = black + ivt.rotateLeft(x.parent) + x = ivt.root + } + } else { + // same as above but with left and right exchanged + w := x.parent.left + if w.color() == red { + w.c = black + x.parent.c = red + ivt.rotateRight(x.parent) + w = x.parent.left + } + if w == nil { + break + } + if w.left.color() == black && w.right.color() == black { + w.c = red + x = x.parent + } else { + if w.left.color() == black { + w.right.c = black + w.c = red + ivt.rotateLeft(w) + w = x.parent.left + } + w.c = x.parent.color() + x.parent.c = black + w.left.c = black + ivt.rotateRight(x.parent) + x = ivt.root + } + } + } + if x != nil { + x.c = black + } +} + +// Insert adds a node with the given interval into the tree. +func (ivt *IntervalTree) Insert(ivl Interval, val interface{}) { + var y *intervalNode + z := &intervalNode{iv: IntervalValue{ivl, val}, max: ivl.End, c: red} + x := ivt.root + for x != nil { + y = x + if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 { + x = x.left + } else { + x = x.right + } + } + + z.parent = y + if y == nil { + ivt.root = z + } else { + if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 { + y.left = z + } else { + y.right = z + } + y.updateMax() + } + z.c = red + ivt.insertFixup(z) + ivt.count++ +} + +func (ivt *IntervalTree) insertFixup(z *intervalNode) { + for z.parent != nil && z.parent.parent != nil && z.parent.color() == red { + if z.parent == z.parent.parent.left { + y := z.parent.parent.right + if y.color() == red { + y.c = black + z.parent.c = black + z.parent.parent.c = red + z = z.parent.parent + } else { + if z == z.parent.right { + z = z.parent + ivt.rotateLeft(z) + } + z.parent.c = black + z.parent.parent.c = red + ivt.rotateRight(z.parent.parent) + } + } else { + // same as then with left/right exchanged + y := z.parent.parent.left + if y.color() == red { + y.c = black + z.parent.c = black + z.parent.parent.c = red + z = z.parent.parent + } else { + if z == z.parent.left { + z = z.parent + ivt.rotateRight(z) + } + z.parent.c = black + z.parent.parent.c = red + ivt.rotateLeft(z.parent.parent) + } + } + } + ivt.root.c = black +} + +// rotateLeft moves x so it is left of its right child +func (ivt *IntervalTree) rotateLeft(x *intervalNode) { + y := x.right + x.right = y.left + if y.left != nil { + y.left.parent = x + } + x.updateMax() + ivt.replaceParent(x, y) + y.left = x + y.updateMax() +} + +// rotateLeft moves x so it is right of its left child +func (ivt *IntervalTree) rotateRight(x *intervalNode) { + if x == nil { + return + } + y := x.left + x.left = y.right + if y.right != nil { + y.right.parent = x + } + x.updateMax() + ivt.replaceParent(x, y) + y.right = x + y.updateMax() +} + +// replaceParent replaces x's parent with y +func (ivt *IntervalTree) replaceParent(x *intervalNode, y *intervalNode) { + y.parent = x.parent + if x.parent == nil { + ivt.root = y + } else { + if x == x.parent.left { + x.parent.left = y + } else { + x.parent.right = y + } + x.parent.updateMax() + } + x.parent = y +} + +// Len gives the number of elements in the tree +func (ivt *IntervalTree) Len() int { return ivt.count } + +// Height is the number of levels in the tree; one node has height 1. +func (ivt *IntervalTree) Height() int { return ivt.root.height() } + +// MaxHeight is the expected maximum tree height given the number of nodes +func (ivt *IntervalTree) MaxHeight() int { + return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5) +} + +// IntervalVisitor is used on tree searches; return false to stop searching. +type IntervalVisitor func(n *IntervalValue) bool + +// Visit calls a visitor function on every tree node intersecting the given interval. +// It will visit each interval [x, y) in ascending order sorted on x. +func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) { + ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) }) +} + +// find the exact node for a given interval +func (ivt *IntervalTree) find(ivl Interval) (ret *intervalNode) { + f := func(n *intervalNode) bool { + if n.iv.Ivl != ivl { + return true + } + ret = n + return false + } + ivt.root.visit(&ivl, f) + return ret +} + +// Find gets the IntervalValue for the node matching the given interval +func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) { + n := ivt.find(ivl) + if n == nil { + return nil + } + return &n.iv +} + +// Intersects returns true if there is some tree node intersecting the given interval. +func (ivt *IntervalTree) Intersects(iv Interval) bool { + x := ivt.root + for x != nil && iv.Compare(&x.iv.Ivl) != 0 { + if x.left != nil && x.left.max.Compare(iv.Begin) > 0 { + x = x.left + } else { + x = x.right + } + } + return x != nil +} + +// Contains returns true if the interval tree's keys cover the entire given interval. +func (ivt *IntervalTree) Contains(ivl Interval) bool { + var maxEnd, minBegin Comparable + + isContiguous := true + ivt.Visit(ivl, func(n *IntervalValue) bool { + if minBegin == nil { + minBegin = n.Ivl.Begin + maxEnd = n.Ivl.End + return true + } + if maxEnd.Compare(n.Ivl.Begin) < 0 { + isContiguous = false + return false + } + if n.Ivl.End.Compare(maxEnd) > 0 { + maxEnd = n.Ivl.End + } + return true + }) + + return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0 +} + +// Stab returns a slice with all elements in the tree intersecting the interval. +func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) { + if ivt.count == 0 { + return nil + } + f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true } + ivt.Visit(iv, f) + return ivs +} + +type StringComparable string + +func (s StringComparable) Compare(c Comparable) int { + sc := c.(StringComparable) + if s < sc { + return -1 + } + if s > sc { + return 1 + } + return 0 +} + +func NewStringInterval(begin, end string) Interval { + return Interval{StringComparable(begin), StringComparable(end)} +} + +func NewStringPoint(s string) Interval { + return Interval{StringComparable(s), StringComparable(s + "\x00")} +} + +// StringAffineComparable treats "" as > all other strings +type StringAffineComparable string + +func (s StringAffineComparable) Compare(c Comparable) int { + sc := c.(StringAffineComparable) + + if len(s) == 0 { + if len(sc) == 0 { + return 0 + } + return 1 + } + if len(sc) == 0 { + return -1 + } + + if s < sc { + return -1 + } + if s > sc { + return 1 + } + return 0 +} + +func NewStringAffineInterval(begin, end string) Interval { + return Interval{StringAffineComparable(begin), StringAffineComparable(end)} +} +func NewStringAffinePoint(s string) Interval { + return NewStringAffineInterval(s, s+"\x00") +} + +func NewInt64Interval(a int64, b int64) Interval { + return Interval{Int64Comparable(a), Int64Comparable(b)} +} + +func NewInt64Point(a int64) Interval { + return Interval{Int64Comparable(a), Int64Comparable(a + 1)} +} + +type Int64Comparable int64 + +func (v Int64Comparable) Compare(c Comparable) int { + vc := c.(Int64Comparable) + cmp := v - vc + if cmp < 0 { + return -1 + } + if cmp > 0 { + return 1 + } + return 0 +} + +// BytesAffineComparable treats empty byte arrays as > all other byte arrays +type BytesAffineComparable []byte + +func (b BytesAffineComparable) Compare(c Comparable) int { + bc := c.(BytesAffineComparable) + + if len(b) == 0 { + if len(bc) == 0 { + return 0 + } + return 1 + } + if len(bc) == 0 { + return -1 + } + + return bytes.Compare(b, bc) +} + +func NewBytesAffineInterval(begin, end []byte) Interval { + return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)} +} +func NewBytesAffinePoint(b []byte) Interval { + be := make([]byte, len(b)+1) + copy(be, b) + be[len(b)] = 0 + return NewBytesAffineInterval(b, be) +} diff --git a/vendor/github.com/coreos/etcd/pkg/contention/BUILD b/vendor/github.com/coreos/etcd/pkg/contention/BUILD new file mode 100644 index 000000000000..9ad78d8e5582 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/contention/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "contention.go", + "doc.go", + ], + importpath = "github.com/coreos/etcd/pkg/contention", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/contention/contention.go b/vendor/github.com/coreos/etcd/pkg/contention/contention.go new file mode 100644 index 000000000000..26ce9a2f3473 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/contention/contention.go @@ -0,0 +1,69 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package contention + +import ( + "sync" + "time" +) + +// TimeoutDetector detects routine starvations by +// observing the actual time duration to finish an action +// or between two events that should happen in a fixed +// interval. If the observed duration is longer than +// the expectation, the detector will report the result. +type TimeoutDetector struct { + mu sync.Mutex // protects all + maxDuration time.Duration + // map from event to time + // time is the last seen time of the event. + records map[uint64]time.Time +} + +// NewTimeoutDetector creates the TimeoutDetector. +func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector { + return &TimeoutDetector{ + maxDuration: maxDuration, + records: make(map[uint64]time.Time), + } +} + +// Reset resets the NewTimeoutDetector. +func (td *TimeoutDetector) Reset() { + td.mu.Lock() + defer td.mu.Unlock() + + td.records = make(map[uint64]time.Time) +} + +// Observe observes an event for given id. It returns false and exceeded duration +// if the interval is longer than the expectation. +func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) { + td.mu.Lock() + defer td.mu.Unlock() + + ok := true + now := time.Now() + exceed := time.Duration(0) + + if pt, found := td.records[which]; found { + exceed = now.Sub(pt) - td.maxDuration + if exceed > 0 { + ok = false + } + } + td.records[which] = now + return ok, exceed +} diff --git a/vendor/github.com/coreos/etcd/pkg/contention/doc.go b/vendor/github.com/coreos/etcd/pkg/contention/doc.go new file mode 100644 index 000000000000..daf452219e07 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/contention/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package contention provides facilities for detecting system contention. +package contention diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/BUILD b/vendor/github.com/coreos/etcd/pkg/cpuutil/BUILD new file mode 100644 index 000000000000..b19ad1c9c91e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/cpuutil/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "endian.go", + ], + importpath = "github.com/coreos/etcd/pkg/cpuutil", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go new file mode 100644 index 000000000000..0323b2d34c64 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cpuutil provides facilities for detecting cpu-specific features. +package cpuutil diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go new file mode 100644 index 000000000000..6ab898d4b596 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go @@ -0,0 +1,36 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuutil + +import ( + "encoding/binary" + "unsafe" +) + +const intWidth int = int(unsafe.Sizeof(0)) + +var byteOrder binary.ByteOrder + +// ByteOrder returns the byte order for the CPU's native endianness. +func ByteOrder() binary.ByteOrder { return byteOrder } + +func init() { + var i int = 0x1 + if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 { + byteOrder = binary.BigEndian + } else { + byteOrder = binary.LittleEndian + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/crc/BUILD b/vendor/github.com/coreos/etcd/pkg/crc/BUILD new file mode 100644 index 000000000000..7584deaf53de --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/crc/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["crc.go"], + importpath = "github.com/coreos/etcd/pkg/crc", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/github.com/coreos/etcd/pkg/crc/crc.go new file mode 100644 index 000000000000..4b998a48455f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/crc/crc.go @@ -0,0 +1,43 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc provides utility function for cyclic redundancy check +// algorithms. +package crc + +import ( + "hash" + "hash/crc32" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +type digest struct { + crc uint32 + tab *crc32.Table +} + +// New creates a new hash.Hash32 computing the CRC-32 checksum +// using the polynomial represented by the Table. +// Modified by xiangli to take a prevcrc. +func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = crc32.Update(d.crc, d.tab, p) + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD b/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD new file mode 100644 index 000000000000..058eb06def97 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD @@ -0,0 +1,106 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "fileutil.go", + "lock.go", + "preallocate.go", + "purge.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_darwin.go", + "sync_darwin.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "dir_unix.go", + "lock_flock.go", + "lock_linux.go", + "preallocate_unix.go", + "sync_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "dir_unix.go", + "lock_flock.go", + "lock_unix.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "dir_unix.go", + "lock_plan9.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "dir_unix.go", + "lock_solaris.go", + "preallocate_unsupported.go", + "sync.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "dir_windows.go", + "lock_windows.go", + "preallocate_unsupported.go", + "sync.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/coreos/etcd/pkg/fileutil", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go new file mode 100644 index 000000000000..58a77dfc1a99 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go @@ -0,0 +1,22 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package fileutil + +import "os" + +// OpenDir opens a directory for syncing. +func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go new file mode 100644 index 000000000000..c123395c0040 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go @@ -0,0 +1,46 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "os" + "syscall" +) + +// OpenDir opens a directory in windows with write access for syncing. +func OpenDir(path string) (*os.File, error) { + fd, err := openDir(path) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func openDir(path string) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + createmode := uint32(syscall.OPEN_EXISTING) + fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go new file mode 100644 index 000000000000..fce5126c6956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -0,0 +1,122 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fileutil implements utility functions related to files and paths. +package fileutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/coreos/pkg/capnslog" +) + +const ( + // PrivateFileMode grants owner to read/write a file. + PrivateFileMode = 0600 + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0700 +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil") +) + +// IsDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func IsDirWriteable(dir string) error { + f := filepath.Join(dir, ".touch") + if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + return err + } + return os.Remove(f) +} + +// ReadDir returns the filenames in the given directory in sorted order. +func ReadDir(dirpath string) ([]string, error) { + dir, err := os.Open(dirpath) + if err != nil { + return nil, err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory +// does not exists. TouchDirAll also ensures the given directory is writable. +func TouchDirAll(dir string) error { + // If path is already a directory, MkdirAll does nothing + // and returns nil. + err := os.MkdirAll(dir, PrivateDirMode) + if err != nil { + // if mkdirAll("a/text") and "text" is not + // a directory, this will return syscall.ENOTDIR + return err + } + return IsDirWriteable(dir) +} + +// CreateDirAll is similar to TouchDirAll but returns error +// if the deepest directory was not empty. +func CreateDirAll(dir string) error { + err := TouchDirAll(dir) + if err == nil { + var ns []string + ns, err = ReadDir(dir) + if err != nil { + return err + } + if len(ns) != 0 { + err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) + } + } + return err +} + +func Exist(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily +// shorten the length of the file. +func ZeroToEnd(f *os.File) error { + // TODO: support FALLOC_FL_ZERO_RANGE + off, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + lenf, lerr := f.Seek(0, io.SeekEnd) + if lerr != nil { + return lerr + } + if err = f.Truncate(off); err != nil { + return err + } + // make sure blocks remain allocated + if err = Preallocate(f, lenf, true); err != nil { + return err + } + _, err = f.Seek(off, io.SeekStart) + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go new file mode 100644 index 000000000000..338627f43c88 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go @@ -0,0 +1,26 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "errors" + "os" +) + +var ( + ErrLocked = errors.New("fileutil: file already locked") +) + +type LockedFile struct{ *os.File } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go new file mode 100644 index 000000000000..542550bc8a96 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go new file mode 100644 index 000000000000..939fea623818 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go @@ -0,0 +1,97 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "io" + "os" + "syscall" +) + +// This used to call syscall.Flock() but that call fails with EBADF on NFS. +// An alternative is lockf() which works on NFS but that call lets a process lock +// the same file twice. Instead, use Linux's non-standard open file descriptor +// locks which will block if the process already holds the file lock. +// +// constants from /usr/include/bits/fcntl-linux.h +const ( + F_OFD_GETLK = 37 + F_OFD_SETLK = 37 + F_OFD_SETLKW = 38 +) + +var ( + wrlck = syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(io.SeekStart), + Start: 0, + Len: 0, + } + + linuxTryLockFile = flockTryLockFile + linuxLockFile = flockLockFile +) + +func init() { + // use open file descriptor locks if the system supports it + getlk := syscall.Flock_t{Type: syscall.F_RDLCK} + if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil { + linuxTryLockFile = ofdTryLockFile + linuxLockFile = ofdLockFile + } +} + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxTryLockFile(path, flag, perm) +} + +func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + flock := wrlck + if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxLockFile(path, flag, perm) +} + +func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + flock := wrlck + err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) + + if err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go new file mode 100644 index 000000000000..fee6a7c8f466 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go @@ -0,0 +1,45 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "syscall" + "time" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + f, err := os.Open(path, flag, perm) + if err != nil { + return nil, ErrLocked + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + for { + f, err := os.OpenFile(path, flag, perm) + if err == nil { + return &LockedFile{f}, nil + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go new file mode 100644 index 000000000000..352ca5590d13 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go @@ -0,0 +1,62 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + lock.Pid = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil { + f.Close() + if err == syscall.EAGAIN { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go new file mode 100644 index 000000000000..ed01164de6e4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris,!linux + +package fileutil + +import ( + "os" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockTryLockFile(path, flag, perm) +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockLockFile(path, flag, perm) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go new file mode 100644 index 000000000000..8698f4a8d117 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go @@ -0,0 +1,125 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "errors" + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + + errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.") +) + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + LOCKFILE_EXCLUSIVE_LOCK = 2 + LOCKFILE_FAIL_IMMEDIATELY = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func open(path string, flag int, perm os.FileMode) (*os.File, error) { + if path == "" { + return nil, fmt.Errorf("cannot open empty filename") + } + var access uint32 + switch flag { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + case syscall.O_WRONLY | syscall.O_CREAT: + access = syscall.GENERIC_ALL + default: + panic(fmt.Errorf("flag %v is not supported", flag)) + } + fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), + access, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, + syscall.OPEN_ALWAYS, + syscall.FILE_ATTRIBUTE_NORMAL, + 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func lockFile(fd syscall.Handle, flags uint32) error { + var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK + flag |= flags + if fd == syscall.InvalidHandle { + return nil + } + err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err.Error() == errLocked.Error() { + return ErrLocked + } else if err != errLockViolation { + return err + } + return nil +} + +func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + var reserved uint32 = 0 + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go new file mode 100644 index 000000000000..c747b7cf81f9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -0,0 +1,54 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "io" + "os" +) + +// Preallocate tries to allocate the space for given +// file. This operation is only supported on linux by a +// few filesystems (btrfs, ext4, etc.). +// If the operation is unsupported, no error will be returned. +// Otherwise, the error encountered will be returned. +func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } + if extendFile { + return preallocExtend(f, sizeInBytes) + } + return preallocFixed(f, sizeInBytes) +} + +func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { + curOff, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + size, err := f.Seek(sizeInBytes, io.SeekEnd) + if err != nil { + return err + } + if _, err = f.Seek(curOff, io.SeekStart); err != nil { + return err + } + if sizeInBytes > size { + return nil + } + return f.Truncate(sizeInBytes) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go new file mode 100644 index 000000000000..1ed09c560f2a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go @@ -0,0 +1,43 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" + "unsafe" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + if err := preallocFixed(f, sizeInBytes); err != nil { + return err + } + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + fstore := &syscall.Fstore_t{ + Flags: syscall.F_ALLOCATEALL, + Posmode: syscall.F_PEOFPOSMODE, + Length: sizeInBytes} + p := unsafe.Pointer(fstore) + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p)) + if errno == 0 || errno == syscall.ENOTSUP { + return nil + } + return errno +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go new file mode 100644 index 000000000000..50bd84f02ada --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + // use mode = 0 to change size + err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // not supported; fallback + // fallocate EINTRs frequently in some environments; fallback + if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + return preallocExtendTrunc(f, sizeInBytes) + } + } + return err +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE + err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // treat not supported as nil error + if ok && errno == syscall.ENOTSUP { + return nil + } + } + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go new file mode 100644 index 000000000000..162fbc5f7826 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go @@ -0,0 +1,25 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +func preallocExtend(f *os.File, sizeInBytes int64) error { + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { return nil } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go new file mode 100644 index 000000000000..92fceab017f3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go @@ -0,0 +1,78 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { + return purgeFile(dirname, suffix, max, interval, stop, nil) +} + +// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. +func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error { + errC := make(chan error, 1) + go func() { + for { + fnames, err := ReadDir(dirname) + if err != nil { + errC <- err + return + } + newfnames := make([]string, 0) + for _, fname := range fnames { + if strings.HasSuffix(fname, suffix) { + newfnames = append(newfnames, fname) + } + } + sort.Strings(newfnames) + fnames = newfnames + for len(newfnames) > int(max) { + f := filepath.Join(dirname, newfnames[0]) + l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + break + } + if err = os.Remove(f); err != nil { + errC <- err + return + } + if err = l.Close(); err != nil { + plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + errC <- err + return + } + plog.Infof("purged file %s successfully", f) + newfnames = newfnames[1:] + } + if purgec != nil { + for i := 0; i < len(fnames)-len(newfnames); i++ { + purgec <- fnames[i] + } + } + select { + case <-time.After(interval): + case <-stop: + return + } + } + }() + return errC +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go new file mode 100644 index 000000000000..54dd41f4f351 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go @@ -0,0 +1,29 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform. +func Fdatasync(f *os.File) error { + return f.Sync() +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go new file mode 100644 index 000000000000..c2f39bf204d2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync on HFS/OSX flushes the data on to the physical drive but the drive +// may not write it to the persistent media for quite sometime and it may be +// written in out-of-order sequence. Using F_FULLFSYNC ensures that the +// physical drive's buffer will also get flushed to the media. +func Fsync(f *os.File) error { + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0)) + if errno == 0 { + return nil + } + return errno +} + +// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence +// on physical drive media. +func Fdatasync(f *os.File) error { + return Fsync(f) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go new file mode 100644 index 000000000000..1bbced915e9b --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go @@ -0,0 +1,34 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is similar to fsync(), but does not flush modified metadata +// unless that metadata is needed in order to allow a subsequent data retrieval +// to be correctly handled. +func Fdatasync(f *os.File) error { + return syscall.Fdatasync(int(f.Fd())) +} diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/BUILD b/vendor/github.com/coreos/etcd/pkg/httputil/BUILD new file mode 100644 index 000000000000..53f5772ce54d --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/httputil/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["httputil.go"], + importpath = "github.com/coreos/etcd/pkg/httputil", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go new file mode 100644 index 000000000000..09f44e7c71d9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq.go + +// Package httputil provides HTTP utility functions. +package httputil + +import ( + "io" + "io/ioutil" + "net/http" +) + +// GracefulClose drains http.Response.Body until it hits EOF +// and closes it. This prevents TCP/TLS connections from closing, +// therefore available for reuse. +func GracefulClose(resp *http.Response) { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() +} diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/BUILD b/vendor/github.com/coreos/etcd/pkg/idutil/BUILD new file mode 100644 index 000000000000..b981552f4190 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/idutil/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["id.go"], + importpath = "github.com/coreos/etcd/pkg/idutil", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go new file mode 100644 index 000000000000..2da210626571 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/idutil/id.go @@ -0,0 +1,78 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package idutil implements utility functions for generating unique, +// randomized ids. +package idutil + +import ( + "math" + "sync" + "time" +) + +const ( + tsLen = 5 * 8 + cntLen = 8 + suffixLen = tsLen + cntLen +) + +// Generator generates unique identifiers based on counters, timestamps, and +// a node member ID. +// +// The initial id is in this format: +// High order 2 bytes are from memberID, next 5 bytes are from timestamp, +// and low order one byte is a counter. +// | prefix | suffix | +// | 2 bytes | 5 bytes | 1 byte | +// | memberID | timestamp | cnt | +// +// The timestamp 5 bytes is different when the machine is restart +// after 1 ms and before 35 years. +// +// It increases suffix to generate the next id. +// The count field may overflow to timestamp field, which is intentional. +// It helps to extend the event window to 2^56. This doesn't break that +// id generated after restart is unique because etcd throughput is << +// 256req/ms(250k reqs/second). +type Generator struct { + mu sync.Mutex + // high order 2 bytes + prefix uint64 + // low order 6 bytes + suffix uint64 +} + +func NewGenerator(memberID uint16, now time.Time) *Generator { + prefix := uint64(memberID) << suffixLen + unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond) + suffix := lowbit(unixMilli, tsLen) << cntLen + return &Generator{ + prefix: prefix, + suffix: suffix, + } +} + +// Next generates a id that is unique. +func (g *Generator) Next() uint64 { + g.mu.Lock() + defer g.mu.Unlock() + g.suffix++ + id := g.prefix | lowbit(g.suffix, suffixLen) + return id +} + +func lowbit(x uint64, n uint) uint64 { + return x & (math.MaxUint64 >> (64 - n)) +} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/BUILD b/vendor/github.com/coreos/etcd/pkg/ioutil/BUILD new file mode 100644 index 000000000000..069edd45f464 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "pagewriter.go", + "readcloser.go", + "reader.go", + "util.go", + ], + importpath = "github.com/coreos/etcd/pkg/ioutil", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go new file mode 100644 index 000000000000..72de1593d3ad --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go @@ -0,0 +1,106 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ioutil + +import ( + "io" +) + +var defaultBufferBytes = 128 * 1024 + +// PageWriter implements the io.Writer interface so that writes will +// either be in page chunks or from flushing. +type PageWriter struct { + w io.Writer + // pageOffset tracks the page offset of the base of the buffer + pageOffset int + // pageBytes is the number of bytes per page + pageBytes int + // bufferedBytes counts the number of bytes pending for write in the buffer + bufferedBytes int + // buf holds the write buffer + buf []byte + // bufWatermarkBytes is the number of bytes the buffer can hold before it needs + // to be flushed. It is less than len(buf) so there is space for slack writes + // to bring the writer to page alignment. + bufWatermarkBytes int +} + +// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes +// to write per page. pageOffset is the starting offset of io.Writer. +func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter { + return &PageWriter{ + w: w, + pageOffset: pageOffset, + pageBytes: pageBytes, + buf: make([]byte, defaultBufferBytes+pageBytes), + bufWatermarkBytes: defaultBufferBytes, + } +} + +func (pw *PageWriter) Write(p []byte) (n int, err error) { + if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes { + // no overflow + copy(pw.buf[pw.bufferedBytes:], p) + pw.bufferedBytes += len(p) + return len(p), nil + } + // complete the slack page in the buffer if unaligned + slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes) + if slack != pw.pageBytes { + partial := slack > len(p) + if partial { + // not enough data to complete the slack page + slack = len(p) + } + // special case: writing to slack page in buffer + copy(pw.buf[pw.bufferedBytes:], p[:slack]) + pw.bufferedBytes += slack + n = slack + p = p[slack:] + if partial { + // avoid forcing an unaligned flush + return n, nil + } + } + // buffer contents are now page-aligned; clear out + if err = pw.Flush(); err != nil { + return n, err + } + // directly write all complete pages without copying + if len(p) > pw.pageBytes { + pages := len(p) / pw.pageBytes + c, werr := pw.w.Write(p[:pages*pw.pageBytes]) + n += c + if werr != nil { + return n, werr + } + p = p[pages*pw.pageBytes:] + } + // write remaining tail to buffer + c, werr := pw.Write(p) + n += c + return n, werr +} + +func (pw *PageWriter) Flush() error { + if pw.bufferedBytes == 0 { + return nil + } + _, err := pw.w.Write(pw.buf[:pw.bufferedBytes]) + pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes + pw.bufferedBytes = 0 + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go b/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go new file mode 100644 index 000000000000..d3efcfe3d5a6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go @@ -0,0 +1,66 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ioutil + +import ( + "fmt" + "io" +) + +// ReaderAndCloser implements io.ReadCloser interface by combining +// reader and closer together. +type ReaderAndCloser struct { + io.Reader + io.Closer +} + +var ( + ErrShortRead = fmt.Errorf("ioutil: short read") + ErrExpectEOF = fmt.Errorf("ioutil: expect EOF") +) + +// NewExactReadCloser returns a ReadCloser that returns errors if the underlying +// reader does not read back exactly the requested number of bytes. +func NewExactReadCloser(rc io.ReadCloser, totalBytes int64) io.ReadCloser { + return &exactReadCloser{rc: rc, totalBytes: totalBytes} +} + +type exactReadCloser struct { + rc io.ReadCloser + br int64 + totalBytes int64 +} + +func (e *exactReadCloser) Read(p []byte) (int, error) { + n, err := e.rc.Read(p) + e.br += int64(n) + if e.br > e.totalBytes { + return 0, ErrExpectEOF + } + if e.br < e.totalBytes && n == 0 { + return 0, ErrShortRead + } + return n, err +} + +func (e *exactReadCloser) Close() error { + if err := e.rc.Close(); err != nil { + return err + } + if e.br < e.totalBytes { + return ErrShortRead + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go b/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go new file mode 100644 index 000000000000..0703ed476d80 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ioutil implements I/O utility functions. +package ioutil + +import "io" + +// NewLimitedBufferReader returns a reader that reads from the given reader +// but limits the amount of data returned to at most n bytes. +func NewLimitedBufferReader(r io.Reader, n int) io.Reader { + return &limitedBufferReader{ + r: r, + n: n, + } +} + +type limitedBufferReader struct { + r io.Reader + n int +} + +func (r *limitedBufferReader) Read(p []byte) (n int, err error) { + np := p + if len(np) > r.n { + np = np[:r.n] + } + return r.r.Read(np) +} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go new file mode 100644 index 000000000000..192ad888c241 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go @@ -0,0 +1,43 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ioutil + +import ( + "io" + "os" + + "github.com/coreos/etcd/pkg/fileutil" +) + +// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library, +// but calls Sync before closing the file. WriteAndSyncFile guarantees the data +// is synced if there is no error returned. +func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err == nil { + err = fileutil.Fsync(f) + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/BUILD b/vendor/github.com/coreos/etcd/pkg/logutil/BUILD new file mode 100644 index 000000000000..d91a7bdf426f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["merge_logger.go"], + importpath = "github.com/coreos/etcd/pkg/logutil", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go new file mode 100644 index 000000000000..cc750f4d3d63 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go @@ -0,0 +1,195 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logutil includes utilities to facilitate logging. +package logutil + +import ( + "fmt" + "sync" + "time" + + "github.com/coreos/pkg/capnslog" +) + +var ( + defaultMergePeriod = time.Second + defaultTimeOutputScale = 10 * time.Millisecond + + outputInterval = time.Second +) + +// line represents a log line that can be printed out +// through capnslog.PackageLogger. +type line struct { + level capnslog.LogLevel + str string +} + +func (l line) append(s string) line { + return line{ + level: l.level, + str: l.str + " " + s, + } +} + +// status represents the merge status of a line. +type status struct { + period time.Duration + + start time.Time // start time of latest merge period + count int // number of merged lines from starting +} + +func (s *status) isInMergePeriod(now time.Time) bool { + return s.period == 0 || s.start.Add(s.period).After(now) +} + +func (s *status) isEmpty() bool { return s.count == 0 } + +func (s *status) summary(now time.Time) string { + ts := s.start.Round(defaultTimeOutputScale) + took := now.Round(defaultTimeOutputScale).Sub(ts) + return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took) +} + +func (s *status) reset(now time.Time) { + s.start = now + s.count = 0 +} + +// MergeLogger supports merge logging, which merges repeated log lines +// and prints summary log lines instead. +// +// For merge logging, MergeLogger prints out the line when the line appears +// at the first time. MergeLogger holds the same log line printed within +// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod. +// It stops merging when the line doesn't appear within the +// defaultMergePeriod. +type MergeLogger struct { + *capnslog.PackageLogger + + mu sync.Mutex // protect statusm + statusm map[line]*status +} + +func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger { + l := &MergeLogger{ + PackageLogger: logger, + statusm: make(map[line]*status), + } + go l.outputLoop() + return l +} + +func (l *MergeLogger) MergeInfo(entries ...interface{}) { + l.merge(line{ + level: capnslog.INFO, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeInfof(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.INFO, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeNotice(entries ...interface{}) { + l.merge(line{ + level: capnslog.NOTICE, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.NOTICE, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeWarning(entries ...interface{}) { + l.merge(line{ + level: capnslog.WARNING, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.WARNING, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeError(entries ...interface{}) { + l.merge(line{ + level: capnslog.ERROR, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.ERROR, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) merge(ln line) { + l.mu.Lock() + + // increase count if the logger is merging the line + if status, ok := l.statusm[ln]; ok { + status.count++ + l.mu.Unlock() + return + } + + // initialize status of the line + l.statusm[ln] = &status{ + period: defaultMergePeriod, + start: time.Now(), + } + // release the lock before IO operation + l.mu.Unlock() + // print out the line at its first time + l.PackageLogger.Logf(ln.level, ln.str) +} + +func (l *MergeLogger) outputLoop() { + for now := range time.Tick(outputInterval) { + var outputs []line + + l.mu.Lock() + for ln, status := range l.statusm { + if status.isInMergePeriod(now) { + continue + } + if status.isEmpty() { + delete(l.statusm, ln) + continue + } + outputs = append(outputs, ln.append(status.summary(now))) + status.reset(now) + } + l.mu.Unlock() + + for _, o := range outputs { + l.PackageLogger.Logf(o.level, o.str) + } + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/monotime/BUILD b/vendor/github.com/coreos/etcd/pkg/monotime/BUILD new file mode 100644 index 000000000000..de04d508cfa9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/monotime/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "issue15006.s", + "monotime.go", + "nanotime.go", + ], + importpath = "github.com/coreos/etcd/pkg/monotime", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s b/vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s new file mode 100644 index 000000000000..c3132a1f0c8a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s @@ -0,0 +1,6 @@ +// Copyright (C) 2016 Arista Networks, Inc. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +// This file is intentionally empty. +// It's a workaround for https://github.com/golang/go/issues/15006 \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/pkg/monotime/monotime.go b/vendor/github.com/coreos/etcd/pkg/monotime/monotime.go new file mode 100644 index 000000000000..a5e16ce1d0e7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/monotime/monotime.go @@ -0,0 +1,26 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monotime + +import ( + "time" +) + +// Time represents a point in monotonic time +type Time uint64 + +func (t Time) Add(d time.Duration) Time { + return Time(uint64(t) + uint64(d.Nanoseconds())) +} diff --git a/vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go b/vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go new file mode 100644 index 000000000000..e3fc846ef9e6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go @@ -0,0 +1,24 @@ +// Copyright (C) 2016 Arista Networks, Inc. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +// Package monotime provides a fast monotonic clock source. +package monotime + +import ( + _ "unsafe" // required to use //go:linkname +) + +//go:noescape +//go:linkname nanotime runtime.nanotime +func nanotime() int64 + +// Now returns the current time in nanoseconds from a monotonic clock. +// The time returned is based on some arbitrary platform-specific point in the +// past. The time returned is guaranteed to increase monotonically at a +// constant rate, unlike time.Now() from the Go standard library, which may +// slow down, speed up, jump forward or backward, due to NTP activity or leap +// seconds. +func Now() Time { + return Time(nanotime()) +} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD new file mode 100644 index 000000000000..97fa436fcba8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD @@ -0,0 +1,79 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "netutil.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "isolate_linux.go", + "routes_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "isolate_stub.go", + "routes.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "isolate_stub.go", + "routes.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/coreos/etcd/pkg/netutil", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/coreos/etcd/pkg/cpuutil:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go new file mode 100644 index 000000000000..418580ac48de --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package netutil + +import ( + "fmt" + "os/exec" +) + +// DropPort drops all tcp packets that are received from the given port and sent to the given port. +func DropPort(port int) error { + cmdStr := fmt.Sprintf("sudo iptables -A OUTPUT -p tcp --destination-port %d -j DROP", port) + if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil { + return err + } + cmdStr = fmt.Sprintf("sudo iptables -A INPUT -p tcp --destination-port %d -j DROP", port) + _, err := exec.Command("/bin/sh", "-c", cmdStr).Output() + return err +} + +// RecoverPort stops dropping tcp packets at given port. +func RecoverPort(port int) error { + cmdStr := fmt.Sprintf("sudo iptables -D OUTPUT -p tcp --destination-port %d -j DROP", port) + if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil { + return err + } + cmdStr = fmt.Sprintf("sudo iptables -D INPUT -p tcp --destination-port %d -j DROP", port) + _, err := exec.Command("/bin/sh", "-c", cmdStr).Output() + return err +} + +// SetLatency adds latency in millisecond scale with random variations. +func SetLatency(ms, rv int) error { + ifces, err := GetDefaultInterfaces() + if err != nil { + return err + } + + if rv > ms { + rv = 1 + } + for ifce := range ifces { + cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv) + _, err = exec.Command("/bin/sh", "-c", cmdStr).Output() + if err != nil { + // the rule has already been added. Overwrite it. + cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv) + _, err = exec.Command("/bin/sh", "-c", cmdStr).Output() + if err != nil { + return err + } + } + } + return nil +} + +// RemoveLatency resets latency configurations. +func RemoveLatency() error { + ifces, err := GetDefaultInterfaces() + if err != nil { + return err + } + for ifce := range ifces { + _, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output() + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go new file mode 100644 index 000000000000..7f4c3e67c2ad --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go @@ -0,0 +1,25 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux + +package netutil + +func DropPort(port int) error { return nil } + +func RecoverPort(port int) error { return nil } + +func SetLatency(ms, rv int) error { return nil } + +func RemoveLatency() error { return nil } diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go new file mode 100644 index 000000000000..5e38dc98dbfe --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go @@ -0,0 +1,169 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package netutil implements network-related utility functions. +package netutil + +import ( + "context" + "net" + "net/url" + "reflect" + "sort" + "time" + + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/pkg/capnslog" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") + + // indirection for testing + resolveTCPAddr = resolveTCPAddrDefault +) + +const retryInterval = time.Second + +// taken from go's ResolveTCP code but uses configurable ctx +func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) { + host, port, serr := net.SplitHostPort(addr) + if serr != nil { + return nil, serr + } + portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port) + if perr != nil { + return nil, perr + } + + var ips []net.IPAddr + if ip := net.ParseIP(host); ip != nil { + ips = []net.IPAddr{{IP: ip}} + } else { + // Try as a DNS name. + ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + ips = ipss + } + // randomize? + ip := ips[0] + return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil +} + +// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. +// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames +// are resolved. +func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) { + newurls := make([][]url.URL, 0) + for _, us := range urls { + nus := make([]url.URL, len(us)) + for i, u := range us { + nu, err := url.Parse(u.String()) + if err != nil { + return nil, err + } + nus[i] = *nu + } + for i, u := range nus { + h, err := resolveURL(ctx, u) + if err != nil { + return nil, err + } + if h != "" { + nus[i].Host = h + } + } + newurls = append(newurls, nus) + } + return newurls, nil +} + +func resolveURL(ctx context.Context, u url.URL) (string, error) { + for ctx.Err() == nil { + host, _, err := net.SplitHostPort(u.Host) + if err != nil { + plog.Errorf("could not parse url %s during tcp resolving", u.Host) + return "", err + } + if host == "localhost" || net.ParseIP(host) != nil { + return "", nil + } + tcpAddr, err := resolveTCPAddr(ctx, u.Host) + if err == nil { + plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) + return tcpAddr.String(), nil + } + plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval) + select { + case <-ctx.Done(): + plog.Errorf("could not resolve host %s", u.Host) + return "", err + case <-time.After(retryInterval): + } + } + return "", ctx.Err() +} + +// urlsEqual checks equality of url.URLS between two arrays. +// This check pass even if an URL is in hostname and opposite is in IP address. +func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) bool { + if len(a) != len(b) { + return false + } + urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b}) + if err != nil { + return false + } + a, b = urls[0], urls[1] + sort.Sort(types.URLs(a)) + sort.Sort(types.URLs(b)) + for i := range a { + if !reflect.DeepEqual(a[i], b[i]) { + return false + } + } + + return true +} + +func URLStringsEqual(ctx context.Context, a []string, b []string) bool { + if len(a) != len(b) { + return false + } + urlsA := make([]url.URL, 0) + for _, str := range a { + u, err := url.Parse(str) + if err != nil { + return false + } + urlsA = append(urlsA, *u) + } + urlsB := make([]url.URL, 0) + for _, str := range b { + u, err := url.Parse(str) + if err != nil { + return false + } + urlsB = append(urlsB, *u) + } + + return urlsEqual(ctx, urlsA, urlsB) +} + +func IsNetworkTimeoutError(err error) bool { + nerr, ok := err.(net.Error) + return ok && nerr.Timeout() +} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes.go b/vendor/github.com/coreos/etcd/pkg/netutil/routes.go new file mode 100644 index 000000000000..3eb6a19ec844 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/netutil/routes.go @@ -0,0 +1,33 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux + +package netutil + +import ( + "fmt" + "runtime" +) + +// GetDefaultHost fetches the a resolvable name that corresponds +// to the machine's default routable interface +func GetDefaultHost() (string, error) { + return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH) +} + +// GetDefaultInterfaces fetches the device name of default routable interface. +func GetDefaultInterfaces() (map[string]uint8, error) { + return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go b/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go new file mode 100644 index 000000000000..5d234d460380 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go @@ -0,0 +1,250 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package netutil + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "sort" + "syscall" + + "github.com/coreos/etcd/pkg/cpuutil" +) + +var errNoDefaultRoute = fmt.Errorf("could not find default route") +var errNoDefaultHost = fmt.Errorf("could not find default host") +var errNoDefaultInterface = fmt.Errorf("could not find default interface") + +// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string. +// An IPv4 address is preferred to an IPv6 address for backward compatibility. +func GetDefaultHost() (string, error) { + rmsgs, rerr := getDefaultRoutes() + if rerr != nil { + return "", rerr + } + + // prioritize IPv4 + if rmsg, ok := rmsgs[syscall.AF_INET]; ok { + if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil { + return host, err + } + delete(rmsgs, syscall.AF_INET) + } + + // sort so choice is deterministic + var families []int + for family := range rmsgs { + families = append(families, int(family)) + } + sort.Ints(families) + + for _, f := range families { + family := uint8(f) + if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil { + return host, err + } + } + + return "", errNoDefaultHost +} + +func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) { + host, oif, err := parsePREFSRC(rmsg) + if host != "" || err != nil { + return host, err + } + + // prefsrc not detected, fall back to getting address from iface + ifmsg, ierr := getIfaceAddr(oif, family) + if ierr != nil { + return "", ierr + } + + attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg) + if aerr != nil { + return "", aerr + } + + for _, attr := range attrs { + // search for RTA_DST because ipv6 doesn't have RTA_SRC + if attr.Attr.Type == syscall.RTA_DST { + return net.IP(attr.Value).String(), nil + } + } + + return "", nil +} + +func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) { + dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC) + if err != nil { + return nil, err + } + + msgs, msgErr := syscall.ParseNetlinkMessage(dat) + if msgErr != nil { + return nil, msgErr + } + + routes := make(map[uint8]*syscall.NetlinkMessage) + rtmsg := syscall.RtMsg{} + for _, m := range msgs { + if m.Header.Type != syscall.RTM_NEWROUTE { + continue + } + buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg]) + if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil { + continue + } + if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN { + // zero-length Dst_len implies default route + msg := m + routes[rtmsg.Family] = &msg + } + } + + if len(routes) > 0 { + return routes, nil + } + + return nil, errNoDefaultRoute +} + +// Used to get an address of interface. +func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) { + dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family)) + if err != nil { + return nil, err + } + + msgs, msgErr := syscall.ParseNetlinkMessage(dat) + if msgErr != nil { + return nil, msgErr + } + + ifaddrmsg := syscall.IfAddrmsg{} + for _, m := range msgs { + if m.Header.Type != syscall.RTM_NEWADDR { + continue + } + buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg]) + if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil { + continue + } + if ifaddrmsg.Index == idx { + return &m, nil + } + } + + return nil, fmt.Errorf("could not find address for interface index %v", idx) + +} + +// Used to get a name of interface. +func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) { + dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC) + if err != nil { + return nil, err + } + + msgs, msgErr := syscall.ParseNetlinkMessage(dat) + if msgErr != nil { + return nil, msgErr + } + + ifinfomsg := syscall.IfInfomsg{} + for _, m := range msgs { + if m.Header.Type != syscall.RTM_NEWLINK { + continue + } + buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg]) + if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil { + continue + } + if ifinfomsg.Index == int32(idx) { + return &m, nil + } + } + + return nil, fmt.Errorf("could not find link for interface index %v", idx) +} + +// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families. +func GetDefaultInterfaces() (map[string]uint8, error) { + interfaces := make(map[string]uint8) + rmsgs, rerr := getDefaultRoutes() + if rerr != nil { + return interfaces, rerr + } + + for family, rmsg := range rmsgs { + _, oif, err := parsePREFSRC(rmsg) + if err != nil { + return interfaces, err + } + + ifmsg, ierr := getIfaceLink(oif) + if ierr != nil { + return interfaces, ierr + } + + attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg) + if aerr != nil { + return interfaces, aerr + } + + for _, attr := range attrs { + if attr.Attr.Type == syscall.IFLA_IFNAME { + // key is an interface name + // possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack + interfaces[string(attr.Value[:len(attr.Value)-1])] += family + } + } + } + if len(interfaces) > 0 { + return interfaces, nil + } + return interfaces, errNoDefaultInterface +} + +// parsePREFSRC returns preferred source address and output interface index (RTA_OIF). +func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) { + var attrs []syscall.NetlinkRouteAttr + attrs, err = syscall.ParseNetlinkRouteAttr(m) + if err != nil { + return "", 0, err + } + + for _, attr := range attrs { + if attr.Attr.Type == syscall.RTA_PREFSRC { + host = net.IP(attr.Value).String() + } + if attr.Attr.Type == syscall.RTA_OIF { + oif = cpuutil.ByteOrder().Uint32(attr.Value) + } + if host != "" && oif != uint32(0) { + break + } + } + + if oif == 0 { + err = errNoDefaultRoute + } + return +} diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/BUILD b/vendor/github.com/coreos/etcd/pkg/pathutil/BUILD new file mode 100644 index 000000000000..b7ca179e0740 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pathutil/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["path.go"], + importpath = "github.com/coreos/etcd/pkg/pathutil", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go new file mode 100644 index 000000000000..f26254ba933e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pathutil implements utility functions for handling slash-separated +// paths. +package pathutil + +import "path" + +// CanonicalURLPath returns the canonical url path for p, which follows the rules: +// 1. the path always starts with "/" +// 2. replace multiple slashes with a single slash +// 3. replace each '.' '..' path name element with equivalent one +// 4. keep the trailing slash +// The function is borrowed from stdlib http.cleanPath in server.go. +func CanonicalURLPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root, + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} diff --git a/vendor/github.com/coreos/etcd/pkg/pbutil/BUILD b/vendor/github.com/coreos/etcd/pkg/pbutil/BUILD new file mode 100644 index 000000000000..3fc89789663c --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pbutil/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["pbutil.go"], + importpath = "github.com/coreos/etcd/pkg/pbutil", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go new file mode 100644 index 000000000000..d70f98dd82fe --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go @@ -0,0 +1,60 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil defines interfaces for handling Protocol Buffer objects. +package pbutil + +import "github.com/coreos/pkg/capnslog" + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil") +) + +type Marshaler interface { + Marshal() (data []byte, err error) +} + +type Unmarshaler interface { + Unmarshal(data []byte) error +} + +func MustMarshal(m Marshaler) []byte { + d, err := m.Marshal() + if err != nil { + plog.Panicf("marshal should never fail (%v)", err) + } + return d +} + +func MustUnmarshal(um Unmarshaler, data []byte) { + if err := um.Unmarshal(data); err != nil { + plog.Panicf("unmarshal should never fail (%v)", err) + } +} + +func MaybeUnmarshal(um Unmarshaler, data []byte) bool { + if err := um.Unmarshal(data); err != nil { + return false + } + return true +} + +func GetBool(v *bool) (vv bool, set bool) { + if v == nil { + return false, false + } + return *v, true +} + +func Boolp(b bool) *bool { return &b } diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/BUILD b/vendor/github.com/coreos/etcd/pkg/runtime/BUILD new file mode 100644 index 000000000000..80885d945ec4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/runtime/BUILD @@ -0,0 +1,57 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "fds_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "fds_other.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "fds_other.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/coreos/etcd/pkg/runtime", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go b/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go new file mode 100644 index 000000000000..8e9359db28ca --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go @@ -0,0 +1,37 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package runtime implements utility functions for runtime systems. +package runtime + +import ( + "io/ioutil" + "syscall" +) + +func FDLimit() (uint64, error) { + var rlimit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil { + return 0, err + } + return rlimit.Cur, nil +} + +func FDUsage() (uint64, error) { + fds, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + return 0, err + } + return uint64(len(fds)), nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go b/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go new file mode 100644 index 000000000000..0cbdb88c7a60 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go @@ -0,0 +1,30 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux + +package runtime + +import ( + "fmt" + "runtime" +) + +func FDLimit() (uint64, error) { + return 0, fmt.Errorf("cannot get FDLimit on %s", runtime.GOOS) +} + +func FDUsage() (uint64, error) { + return 0, fmt.Errorf("cannot get FDUsage on %s", runtime.GOOS) +} diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/BUILD b/vendor/github.com/coreos/etcd/pkg/schedule/BUILD new file mode 100644 index 000000000000..4b51f0f3c38e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/schedule/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "schedule.go", + ], + importpath = "github.com/coreos/etcd/pkg/schedule", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/net/context:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/doc.go b/vendor/github.com/coreos/etcd/pkg/schedule/doc.go new file mode 100644 index 000000000000..cca2c75fb6a9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/schedule/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package schedule provides mechanisms and policies for scheduling units of work. +package schedule diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go new file mode 100644 index 000000000000..bf8528b753a0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go @@ -0,0 +1,166 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedule + +import ( + "sync" + + "golang.org/x/net/context" +) + +type Job func(context.Context) + +// Scheduler can schedule jobs. +type Scheduler interface { + // Schedule asks the scheduler to schedule a job defined by the given func. + // Schedule to a stopped scheduler might panic. + Schedule(j Job) + + // Pending returns number of pending jobs + Pending() int + + // Scheduled returns the number of scheduled jobs (excluding pending jobs) + Scheduled() int + + // Finished returns the number of finished jobs + Finished() int + + // WaitFinish waits until at least n job are finished and all pending jobs are finished. + WaitFinish(n int) + + // Stop stops the scheduler. + Stop() +} + +type fifo struct { + mu sync.Mutex + + resume chan struct{} + scheduled int + finished int + pendings []Job + + ctx context.Context + cancel context.CancelFunc + + finishCond *sync.Cond + donec chan struct{} +} + +// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO +// order sequentially +func NewFIFOScheduler() Scheduler { + f := &fifo{ + resume: make(chan struct{}, 1), + donec: make(chan struct{}, 1), + } + f.finishCond = sync.NewCond(&f.mu) + f.ctx, f.cancel = context.WithCancel(context.Background()) + go f.run() + return f +} + +// Schedule schedules a job that will be ran in FIFO order sequentially. +func (f *fifo) Schedule(j Job) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.cancel == nil { + panic("schedule: schedule to stopped scheduler") + } + + if len(f.pendings) == 0 { + select { + case f.resume <- struct{}{}: + default: + } + } + f.pendings = append(f.pendings, j) +} + +func (f *fifo) Pending() int { + f.mu.Lock() + defer f.mu.Unlock() + return len(f.pendings) +} + +func (f *fifo) Scheduled() int { + f.mu.Lock() + defer f.mu.Unlock() + return f.scheduled +} + +func (f *fifo) Finished() int { + f.finishCond.L.Lock() + defer f.finishCond.L.Unlock() + return f.finished +} + +func (f *fifo) WaitFinish(n int) { + f.finishCond.L.Lock() + for f.finished < n || len(f.pendings) != 0 { + f.finishCond.Wait() + } + f.finishCond.L.Unlock() +} + +// Stop stops the scheduler and cancels all pending jobs. +func (f *fifo) Stop() { + f.mu.Lock() + f.cancel() + f.cancel = nil + f.mu.Unlock() + <-f.donec +} + +func (f *fifo) run() { + // TODO: recover from job panic? + defer func() { + close(f.donec) + close(f.resume) + }() + + for { + var todo Job + f.mu.Lock() + if len(f.pendings) != 0 { + f.scheduled++ + todo = f.pendings[0] + } + f.mu.Unlock() + if todo == nil { + select { + case <-f.resume: + case <-f.ctx.Done(): + f.mu.Lock() + pendings := f.pendings + f.pendings = nil + f.mu.Unlock() + // clean up pending jobs + for _, todo := range pendings { + todo(f.ctx) + } + return + } + } else { + todo(f.ctx) + f.finishCond.L.Lock() + f.finished++ + f.pendings = f.pendings[1:] + f.finishCond.Broadcast() + f.finishCond.L.Unlock() + } + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/srv/BUILD b/vendor/github.com/coreos/etcd/pkg/srv/BUILD new file mode 100644 index 000000000000..3707eb3e9689 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["srv.go"], + importpath = "github.com/coreos/etcd/pkg/srv", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coreos/etcd/pkg/types:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 000000000000..fefcbcb4b889 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,140 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + failCount := 0 + err := updateNodeMap(service+"-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) + failCount++ + } + err = updateNodeMap(service, "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) + failCount++ + } + if failCount == 2 { + return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/BUILD b/vendor/github.com/coreos/etcd/pkg/tlsutil/BUILD new file mode 100644 index 000000000000..9ca1ad871c6f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/tlsutil/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "tlsutil.go", + ], + importpath = "github.com/coreos/etcd/pkg/tlsutil", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go new file mode 100644 index 000000000000..3b6aa670bafe --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tlsutil provides utility functions for handling TLS. +package tlsutil diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go new file mode 100644 index 000000000000..79b1f632ed51 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go @@ -0,0 +1,72 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" +) + +// NewCertPool creates x509 certPool with provided CA files. +func NewCertPool(CAFiles []string) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, CAFile := range CAFiles { + pemByte, err := ioutil.ReadFile(CAFile) + if err != nil { + return nil, err + } + + for { + var block *pem.Block + block, pemByte = pem.Decode(pemByte) + if block == nil { + break + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certPool.AddCert(cert) + } + } + + return certPool, nil +} + +// NewCert generates TLS cert by using the given cert,key and parse function. +func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { + cert, err := ioutil.ReadFile(certfile) + if err != nil { + return nil, err + } + + key, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, err + } + + if parseFunc == nil { + parseFunc = tls.X509KeyPair + } + + tlsCert, err := parseFunc(cert, key) + if err != nil { + return nil, err + } + return &tlsCert, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/BUILD b/vendor/github.com/coreos/etcd/pkg/transport/BUILD new file mode 100644 index 000000000000..3ae75dce137f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "keepalive_listener.go", + "limit_listen.go", + "listener.go", + "listener_tls.go", + "timeout_conn.go", + "timeout_dialer.go", + "timeout_listener.go", + "timeout_transport.go", + "tls.go", + "transport.go", + "unix_listener.go", + ], + importpath = "github.com/coreos/etcd/pkg/transport", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/github.com/coreos/etcd/pkg/transport/doc.go new file mode 100644 index 000000000000..37658ce591a4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport implements various HTTP transport utilities based on Go +// net package. +package transport diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go new file mode 100644 index 000000000000..6ccae4ee4a14 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go @@ -0,0 +1,94 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "time" +) + +type keepAliveConn interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(d time.Duration) error +} + +// NewKeepAliveListener returns a listener that listens on the given address. +// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. +// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. +// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html +func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { + if scheme == "https" { + if tlscfg == nil { + return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") + } + return newTLSKeepaliveListener(l, tlscfg), nil + } + + return &keepaliveListener{ + Listener: l, + }, nil +} + +type keepaliveListener struct{ net.Listener } + +func (kln *keepaliveListener) Accept() (net.Conn, error) { + c, err := kln.Listener.Accept() + if err != nil { + return nil, err + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + return c, nil +} + +// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. +type tlsKeepaliveListener struct { + net.Listener + config *tls.Config +} + +// Accept waits for and returns the next incoming TLS connection. +// The returned connection c is a *tls.Conn. +func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + c = tls.Server(c, l.config) + return +} + +// NewListener creates a Listener which accepts connections from an inner +// Listener and wraps each connection with Server. +// The configuration config must be non-nil and must have +// at least one certificate. +func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { + l := &tlsKeepaliveListener{} + l.Listener = inner + l.config = config + return l +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go new file mode 100644 index 000000000000..930c542066f8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go @@ -0,0 +1,80 @@ +// Copyright 2013 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides network utility functions, complementing the more +// common ones in the net package. +package transport + +import ( + "errors" + "net" + "sync" + "time" +) + +var ( + ErrNotTCP = errors.New("only tcp connections have keepalive") +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} + +func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlive(doKeepAlive) +} + +func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlivePeriod(d) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go new file mode 100644 index 000000000000..3b58b41543f2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -0,0 +1,260 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "os" + "path/filepath" + "strings" + "time" + + "github.com/coreos/etcd/pkg/tlsutil" +) + +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { + if l, err = newListener(addr, scheme); err != nil { + return nil, err + } + return wrapTLS(addr, scheme, tlsinfo, l) +} + +func newListener(addr string, scheme string) (net.Listener, error) { + if scheme == "unix" || scheme == "unixs" { + // unix sockets via unix://laddr + return NewUnixListener(addr) + } + return net.Listen("tcp", addr) +} + +func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { + if scheme != "https" && scheme != "unixs" { + return l, nil + } + return newTLSListener(l, tlsinfo) +} + +type TLSInfo struct { + CertFile string + KeyFile string + CAFile string + TrustedCAFile string + ClientCertAuth bool + + // ServerName ensures the cert matches the given host in case of discovery / virtual hosting + ServerName string + + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + + selfCert bool + + // parseFunc exists to simplify testing. Typically, parseFunc + // should be left nil. In that case, tls.X509KeyPair will be used. + parseFunc func([]byte, []byte) (tls.Certificate, error) +} + +func (info TLSInfo) String() string { + return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth) +} + +func (info TLSInfo) Empty() bool { + return info.CertFile == "" && info.KeyFile == "" +} + +func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { + if err = os.MkdirAll(dirpath, 0700); err != nil { + return + } + + certPath := filepath.Join(dirpath, "cert.pem") + keyPath := filepath.Join(dirpath, "key.pem") + _, errcert := os.Stat(certPath) + _, errkey := os.Stat(keyPath) + if errcert == nil && errkey == nil { + info.CertFile = certPath + info.KeyFile = keyPath + info.selfCert = true + return + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return + } + + tmpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"etcd"}}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * (24 * time.Hour)), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + for _, host := range hosts { + h, _, _ := net.SplitHostPort(host) + if ip := net.ParseIP(h); ip != nil { + tmpl.IPAddresses = append(tmpl.IPAddresses, ip) + } else { + tmpl.DNSNames = append(tmpl.DNSNames, h) + } + } + + priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) + if err != nil { + return + } + + certOut, err := os.Create(certPath) + if err != nil { + return + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + + b, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return + } + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return + } + pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) + keyOut.Close() + + return SelfCert(dirpath, hosts) +} + +func (info TLSInfo) baseConfig() (*tls.Config, error) { + if info.KeyFile == "" || info.CertFile == "" { + return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) + } + + tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if err != nil { + return nil, err + } + + cfg := &tls.Config{ + Certificates: []tls.Certificate{*tlsCert}, + MinVersion: tls.VersionTLS12, + ServerName: info.ServerName, + } + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } + return cfg, nil +} + +// cafiles returns a list of CA file paths. +func (info TLSInfo) cafiles() []string { + cs := make([]string, 0) + if info.CAFile != "" { + cs = append(cs, info.CAFile) + } + if info.TrustedCAFile != "" { + cs = append(cs, info.TrustedCAFile) + } + return cs +} + +// ServerConfig generates a tls.Config object for use by an HTTP server. +func (info TLSInfo) ServerConfig() (*tls.Config, error) { + cfg, err := info.baseConfig() + if err != nil { + return nil, err + } + + cfg.ClientAuth = tls.NoClientCert + if info.CAFile != "" || info.ClientCertAuth { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + CAFiles := info.cafiles() + if len(CAFiles) > 0 { + cp, err := tlsutil.NewCertPool(CAFiles) + if err != nil { + return nil, err + } + cfg.ClientCAs = cp + } + + // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server + cfg.NextProtos = []string{"h2"} + + return cfg, nil +} + +// ClientConfig generates a tls.Config object for use by an HTTP client. +func (info TLSInfo) ClientConfig() (*tls.Config, error) { + var cfg *tls.Config + var err error + + if !info.Empty() { + cfg, err = info.baseConfig() + if err != nil { + return nil, err + } + } else { + cfg = &tls.Config{ServerName: info.ServerName} + } + + CAFiles := info.cafiles() + if len(CAFiles) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles) + if err != nil { + return nil, err + } + } + + if info.selfCert { + cfg.InsecureSkipVerify = true + } + return cfg, nil +} + +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go new file mode 100644 index 000000000000..86511860335a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,217 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + + st := tlsConn.ConnectionState() + if len(st.PeerCertificates) > 0 { + cert := st.PeerCertificates[0] + addr := tlsConn.RemoteAddr().String() + if cerr := checkCert(ctx, cert, addr); cerr != nil { + l.handshakeFailure(tlsConn, cerr) + return + } + } + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCert(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + h, _, herr := net.SplitHostPort(remoteAddr) + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go new file mode 100644 index 000000000000..7e8c02030fed --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go @@ -0,0 +1,44 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type timeoutConn struct { + net.Conn + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (c timeoutConn) Write(b []byte) (n int, err error) { + if c.wtimeoutd > 0 { + if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Write(b) +} + +func (c timeoutConn) Read(b []byte) (n int, err error) { + if c.rdtimeoutd > 0 { + if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go new file mode 100644 index 000000000000..6ae39ecfc9b3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go @@ -0,0 +1,36 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type rwTimeoutDialer struct { + wtimeoutd time.Duration + rdtimeoutd time.Duration + net.Dialer +} + +func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) { + conn, err := d.Dialer.Dial(network, address) + tconn := &timeoutConn{ + rdtimeoutd: d.rdtimeoutd, + wtimeoutd: d.wtimeoutd, + Conn: conn, + } + return tconn, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go new file mode 100644 index 000000000000..b35e04955bb0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go @@ -0,0 +1,57 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +// NewTimeoutListener returns a listener that listens on the given address. +// If read/write on the accepted connection blocks longer than its time limit, +// it will return timeout error. +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { + ln, err := newListener(addr, scheme) + if err != nil { + return nil, err + } + ln = &rwTimeoutListener{ + Listener: ln, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + } + if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { + return nil, err + } + return ln, nil +} + +type rwTimeoutListener struct { + net.Listener + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { + c, err := rwln.Listener.Accept() + if err != nil { + return nil, err + } + return timeoutConn{ + Conn: c, + wtimeoutd: rwln.wtimeoutd, + rdtimeoutd: rwln.rdtimeoutd, + }, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go new file mode 100644 index 000000000000..ea16b4c0f869 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go @@ -0,0 +1,51 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "time" +) + +// NewTimeoutTransport returns a transport created using the given TLS info. +// If read/write on the created connection blocks longer than its time limit, +// it will return timeout error. +// If read/write timeout is set, transport will not be able to reuse connection. +func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { + tr, err := NewTransport(info, dialtimeoutd) + if err != nil { + return nil, err + } + + if rdtimeoutd != 0 || wtimeoutd != 0 { + // the timed out connection will timeout soon after it is idle. + // it should not be put back to http transport as an idle connection for future usage. + tr.MaxIdleConnsPerHost = -1 + } else { + // allow more idle connections between peers to avoid unnecessary port allocation. + tr.MaxIdleConnsPerHost = 1024 + } + + tr.Dial = (&rwTimeoutDialer{ + Dialer: net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + }).Dial + return tr, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/github.com/coreos/etcd/pkg/transport/tls.go new file mode 100644 index 000000000000..62fe0d385195 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/tls.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "strings" + "time" +) + +// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those +// endpoints that could be validated as secure. +func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { + t, err := NewTransport(tlsInfo, 5*time.Second) + if err != nil { + return nil, err + } + var errs []string + var endpoints []string + for _, ep := range eps { + if !strings.HasPrefix(ep, "https://") { + errs = append(errs, fmt.Sprintf("%q is insecure", ep)) + continue + } + conn, cerr := t.Dial("tcp", ep[len("https://"):]) + if cerr != nil { + errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) + continue + } + conn.Close() + endpoints = append(endpoints, ep) + } + if len(errs) != 0 { + err = fmt.Errorf("%s", strings.Join(errs, ",")) + } + return endpoints, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go new file mode 100644 index 000000000000..4a7fe69d2e19 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/transport.go @@ -0,0 +1,71 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "strings" + "time" +) + +type unixTransport struct{ *http.Transport } + +func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { + cfg, err := info.ClientConfig() + if err != nil { + return nil, err + } + + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: dialtimeoutd, + // value taken from http.DefaultTransport + KeepAlive: 30 * time.Second, + }).Dial, + // value taken from http.DefaultTransport + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + + dialer := (&net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }) + dial := func(net, addr string) (net.Conn, error) { + return dialer.Dial("unix", addr) + } + + tu := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + ut := &unixTransport{tu} + + t.RegisterProtocol("unix", ut) + t.RegisterProtocol("unixs", ut) + + return t, nil +} + +func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { + url := *req.URL + req.URL = &url + req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) + return urt.Transport.RoundTrip(req) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go new file mode 100644 index 000000000000..123e2036f0f0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "os" +) + +type unixListener struct{ net.Listener } + +func NewUnixListener(addr string) (net.Listener, error) { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return nil, err + } + l, err := net.Listen("unix", addr) + if err != nil { + return nil, err + } + return &unixListener{l}, nil +} + +func (ul *unixListener) Close() error { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { + return err + } + return ul.Listener.Close() +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/BUILD b/vendor/github.com/coreos/etcd/pkg/types/BUILD new file mode 100644 index 000000000000..e299fd1194f6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "id.go", + "set.go", + "slice.go", + "urls.go", + "urlsmap.go", + ], + importpath = "github.com/coreos/etcd/pkg/types", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go new file mode 100644 index 000000000000..de8ef0bd7126 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go new file mode 100644 index 000000000000..1b042d9ce652 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/id.go @@ -0,0 +1,41 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" +) + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go new file mode 100644 index 000000000000..73ef431bef1f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/set.go @@ -0,0 +1,178 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go new file mode 100644 index 000000000000..0dd9ca798ae4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go new file mode 100644 index 000000000000..9e5d03ff6457 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go new file mode 100644 index 000000000000..47690cc381a3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// URLsMap is a map from a name to its URLs. +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + +// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + var pairs []string + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + var urls []string + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +// Len returns the size of URLsMap. +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/vendor/github.com/coreos/etcd/pkg/wait/BUILD b/vendor/github.com/coreos/etcd/pkg/wait/BUILD new file mode 100644 index 000000000000..11aa1e3b87c8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/wait/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "wait.go", + "wait_time.go", + ], + importpath = "github.com/coreos/etcd/pkg/wait", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/github.com/coreos/etcd/pkg/wait/wait.go new file mode 100644 index 000000000000..34fa237e825b --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/wait/wait.go @@ -0,0 +1,91 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package wait provides utility functions for polling, listening using Go +// channel. +package wait + +import ( + "log" + "sync" +) + +// Wait is an interface that provides the ability to wait and trigger events that +// are associated with IDs. +type Wait interface { + // Register waits returns a chan that waits on the given ID. + // The chan will be triggered when Trigger is called with + // the same ID. + Register(id uint64) <-chan interface{} + // Trigger triggers the waiting chans with the given ID. + Trigger(id uint64, x interface{}) + IsRegistered(id uint64) bool +} + +type list struct { + l sync.Mutex + m map[uint64]chan interface{} +} + +// New creates a Wait. +func New() Wait { + return &list{m: make(map[uint64]chan interface{})} +} + +func (w *list) Register(id uint64) <-chan interface{} { + w.l.Lock() + defer w.l.Unlock() + ch := w.m[id] + if ch == nil { + ch = make(chan interface{}, 1) + w.m[id] = ch + } else { + log.Panicf("dup id %x", id) + } + return ch +} + +func (w *list) Trigger(id uint64, x interface{}) { + w.l.Lock() + ch := w.m[id] + delete(w.m, id) + w.l.Unlock() + if ch != nil { + ch <- x + close(ch) + } +} + +func (w *list) IsRegistered(id uint64) bool { + w.l.Lock() + defer w.l.Unlock() + _, ok := w.m[id] + return ok +} + +type waitWithResponse struct { + ch <-chan interface{} +} + +func NewWithResponse(ch <-chan interface{}) Wait { + return &waitWithResponse{ch: ch} +} + +func (w *waitWithResponse) Register(id uint64) <-chan interface{} { + return w.ch +} +func (w *waitWithResponse) Trigger(id uint64, x interface{}) {} +func (w *waitWithResponse) IsRegistered(id uint64) bool { + panic("waitWithResponse.IsRegistered() shouldn't be called") +} diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go b/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go new file mode 100644 index 000000000000..297e48a47d76 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go @@ -0,0 +1,66 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wait + +import "sync" + +type WaitTime interface { + // Wait returns a chan that waits on the given logical deadline. + // The chan will be triggered when Trigger is called with a + // deadline that is later than the one it is waiting for. + Wait(deadline uint64) <-chan struct{} + // Trigger triggers all the waiting chans with an earlier logical deadline. + Trigger(deadline uint64) +} + +var closec chan struct{} + +func init() { closec = make(chan struct{}); close(closec) } + +type timeList struct { + l sync.Mutex + lastTriggerDeadline uint64 + m map[uint64]chan struct{} +} + +func NewTimeList() *timeList { + return &timeList{m: make(map[uint64]chan struct{})} +} + +func (tl *timeList) Wait(deadline uint64) <-chan struct{} { + tl.l.Lock() + defer tl.l.Unlock() + if tl.lastTriggerDeadline >= deadline { + return closec + } + ch := tl.m[deadline] + if ch == nil { + ch = make(chan struct{}) + tl.m[deadline] = ch + } + return ch +} + +func (tl *timeList) Trigger(deadline uint64) { + tl.l.Lock() + defer tl.l.Unlock() + tl.lastTriggerDeadline = deadline + for t, ch := range tl.m { + if t <= deadline { + delete(tl.m, t) + close(ch) + } + } +} diff --git a/vendor/github.com/coreos/etcd/raft/BUILD b/vendor/github.com/coreos/etcd/raft/BUILD new file mode 100644 index 000000000000..7cc202cf5cf8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "log.go", + "log_unstable.go", + "logger.go", + "node.go", + "progress.go", + "raft.go", + "rawnode.go", + "read_only.go", + "status.go", + "storage.go", + "util.go", + ], + importpath = "github.com/coreos/etcd/raft", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/raft/raftpb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md new file mode 100644 index 000000000000..f485b839771b --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/README.md @@ -0,0 +1,196 @@ +# Raft library + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. + +This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more. + +Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance. + +To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state. + +In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. + +A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample + +# Features + +This raft implementation is a full feature implementation of Raft protocol. Features includes: + +- Leader election +- Log replication +- Log compaction +- Membership changes +- Leadership transfer extension +- Efficient linearizable read-only queries served by both the leader and followers + - leader checks with quorum and bypasses Raft log before processing read-only queries + - followers asks leader to get a safe read index before processing read-only queries +- More efficient lease-based linearizable read-only queries served by both the leader and followers + - leader bypasses Raft log and processing read-only queries locally + - followers asks leader to get a safe read index before processing read-only queries + - this approach relies on the clock of the all the machines in raft group + +This raft implementation also includes a few optional enhancements: + +- Optimistic pipelining to reduce log replication latency +- Flow control for log replication +- Batching Raft messages to reduce synchronized network I/O calls +- Batching log entries to reduce disk synchronized I/O +- Writing to leader's disk in parallel +- Internal proposal redirection from followers to leader +- Automatic stepping down when the leader loses quorum + +## Notable Users + +- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database +- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database +- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store +- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft +- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. +- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks + +## Usage + +The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a three-node cluster +```go + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + // Set peer list to the other nodes in the cluster. + // Note that they need to be started separately as well. + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) +``` + +Start a single node cluster, like so: +```go + // Create storage and config as shown above. + // Set peer list to itself, so this node can become the leader of this single-node cluster. + peers := []raft.Peer{{ID: 0x01}} + n := raft.StartNode(c, peers) +``` + +To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so: +```go + // Create storage and config as shown above. + n := raft.StartNode(c, nil) +``` + +To restart a node from previous state: +```go + storage := raft.NewMemoryStorage() + + // Recover the in-memory storage from persistent snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // Restart raft without peer information. + // Peer information is already included in the storage. + n := raft.RestartNode(c) +``` + +After creating a Node, the user has a few responsibilities: + +First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are not empty. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied. + +Third, after receiving a message from another node, pass it to Node.Step: + +```go + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } +``` + +Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick". + +The total state machine handling loop will look something like this: + +```go + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.State, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } +``` + +To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call: + +```go + n.Propose(ctx, data) +``` + +If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. + +To add or remove node in a cluster, build ConfChange struct 'cc' and call: + +```go + n.ProposeConfChange(ctx, cc) +``` + +After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through: + +```go + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) +``` + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +## Implementation notes + +This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. + +To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. + +This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster. diff --git a/vendor/github.com/coreos/etcd/raft/design.md b/vendor/github.com/coreos/etcd/raft/design.md new file mode 100644 index 000000000000..7bc0531dce6f --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/design.md @@ -0,0 +1,57 @@ +## Progress + +Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. + +`replication message` is a `msgApp` with log entries. + +A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`. + +A progress is in one of the three state: `probe`, `replicate`, `snapshot`. + +``` + +--------------------------------------------------------+ + | send snapshot | + | | + +---------+----------+ +----------v---------+ + +---> probe | | snapshot | + | | max inflight = 1 <----------------------------------+ max inflight = 0 | + | +---------+----------+ +--------------------+ + | | 1. snapshot success + | | (next=snapshot.index + 1) + | | 2. snapshot failure + | | (no change) + | | 3. receives msgAppResp(rej=false&&index>lastsnap.index) + | | (match=m.index,next=match+1) +receives msgAppResp(rej=true) +(next=match+1)| | + | | + | | + | | receives msgAppResp(rej=false&&index>match) + | | (match=m.index,next=match+1) + | | + | | + | | + | +---------v----------+ + | | replicate | + +---+ max inflight = n | + +--------------------+ +``` + +When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`. + +When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower. + +When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`. + +A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress. + +A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low. see open question) + +A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied. + +### Flow Control + +1. limit the max size of message sent per message. Max should be configurable. +Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next` + +2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go new file mode 100644 index 000000000000..b55c591ff5d7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/doc.go @@ -0,0 +1,300 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package raft sends and receives messages in the Protocol Buffer format +defined in the raftpb package. + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. + +A simple example application, _raftexample_, is also available to help illustrate +how to use this package in practice: +https://github.com/coreos/etcd/tree/master/contrib/raftexample + +Usage + +The primary object in raft is a Node. You either start a Node from scratch +using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a node from scratch: + + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) + +To restart a node from previous state: + + storage := raft.NewMemoryStorage() + + // recover the in-memory storage from persistent + // snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // restart raft without peer information. + // peer information is already included in the storage. + n := raft.RestartNode(c) + +Now that you are holding onto a Node you have a few responsibilities: + +First, you must read from the Node.Ready() channel and process the updates +it contains. These steps may be performed in parallel, except as noted in step +2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are +not empty. Note that when writing an Entry with Index i, any +previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that +no messages be sent until the latest HardState has been persisted to disk, +and all Entries written by any previous Ready batch (Messages may be sent while +entries from the same batch are being persisted). To reduce the I/O latency, an +optimization can be applied to make leader write to disk in parallel with its +followers (as explained at section 10.2.1 in Raft thesis). If any Message has type +MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be +large). + +Note: Marshalling messages is not thread-safe; it is important that you +make sure that no new entries are persisted while marshalling. +The easiest way to achieve this is to serialise the messages directly inside +your main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. +If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() +to apply it to the node. The configuration change may be cancelled at this point +by setting the NodeID field to zero before calling ApplyConfChange +(but ApplyConfChange must be called one way or the other, and the decision to cancel +must be based solely on the state machine and not external information such as +the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. +This may be done at any time after step 1, although all updates must be processed +in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an +implementation of the Storage interface. The provided MemoryStorage +type can be used for this (if you repopulate its state upon a +restart), or you can supply your own disk-backed implementation. + +Third, when you receive a message from another node, pass it to Node.Step: + + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } + +Finally, you need to call Node.Tick() at regular intervals (probably +via a time.Ticker). Raft has two important timeouts: heartbeat and the +election timeout. However, internally to the raft package time is +represented by an abstract "tick". + +The total state machine handling loop will look something like this: + + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.State, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } + +To propose changes to the state machine from your node take your application +data, serialize it into a byte slice and call: + + n.Propose(ctx, data) + +If the proposal is committed, data will appear in committed entries with type +raftpb.EntryNormal. There is no guarantee that a proposed command will be +committed; you may have to re-propose after a timeout. + +To add or remove node in a cluster, build ConfChange struct 'cc' and call: + + n.ProposeConfChange(ctx, cc) + +After config change is committed, some committed entry with type +raftpb.EntryConfChange will be returned. You must apply it to node through: + + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +Implementation notes + +This implementation is up to date with the final Raft thesis +(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our +implementation of the membership change protocol differs somewhat from +that described in chapter 4. The key invariant that membership changes +happen one node at a time is preserved, but in our implementation the +membership change takes effect when its entry is applied, not when it +is added to the log (so the entry is committed under the old +membership instead of the new). This is equivalent in terms of safety, +since the old and new configurations are guaranteed to overlap. + +To ensure that we do not attempt to commit two membership changes at +once by matching log positions (which would be unsafe since they +should have different quorum requirements), we simply disallow any +proposed membership change while any uncommitted change appears in +the leader's log. + +This approach introduces a problem when you try to remove a member +from a two-member cluster: If one of the members dies before the +other one receives the commit of the confchange entry, then the member +cannot be removed any more since the cluster cannot make progress. +For this reason it is highly recommended to use three or more nodes in +every cluster. + +MessageType + +Package raft sends and receives message in Protocol Buffer format (defined +in raftpb package). Each state (follower, candidate, leader) implements its +own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when +advancing with the given raftpb.Message. Each step is determined by its +raftpb.MessageType. Note that every step is checked by one common method +'Step' that safety-checks the terms of node and incoming message to prevent +stale log entries: + + 'MsgHup' is used for election. If a node is a follower or candidate, the + 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or + candidate has not received any heartbeat before the election timeout, it + passes 'MsgHup' to its Step method and becomes (or remains) a candidate to + start a new election. + + 'MsgBeat' is an internal type that signals the leader to send a heartbeat of + the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in + the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to + send periodic 'MsgHeartbeat' messages to its followers. + + 'MsgProp' proposes to append data to its log entries. This is a special + type to redirect proposals to leader. Therefore, send method overwrites + raftpb.Message's term with its HardState's term to avoid attaching its + local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step' + method, the leader first calls the 'appendEntry' method to append entries + to its log, and then calls 'bcastAppend' method to send those entries to + its peers. When passed to candidate, 'MsgProp' is dropped. When passed to + follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send + method. It is stored with sender's ID and later forwarded to leader by + rafthttp package. + + 'MsgApp' contains log entries to replicate. A leader calls bcastAppend, + which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp' + type. When 'MsgApp' is passed to candidate's Step method, candidate reverts + back to follower, because it indicates that there is a valid leader sending + 'MsgApp' messages. Candidate and follower respond to this message in + 'MsgAppResp' type. + + 'MsgAppResp' is response to log replication request('MsgApp'). When + 'MsgApp' is passed to candidate or follower's Step method, it responds by + calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft + mailbox. + + 'MsgVote' requests votes for election. When a node is a follower or + candidate and 'MsgHup' is passed to its Step method, then the node calls + 'campaign' method to campaign itself to become a leader. Once 'campaign' + method is called, the node becomes candidate and sends 'MsgVote' to peers + in cluster to request votes. When passed to leader or candidate's Step + method and the message's Term is lower than leader's or candidate's, + 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true). + If leader or candidate receives 'MsgVote' with higher term, it will revert + back to follower. When 'MsgVote' is passed to follower, it votes for the + sender only when sender's last term is greater than MsgVote's term or + sender's last term is equal to MsgVote's term but sender's last committed + index is greater than or equal to follower's. + + 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is + passed to candidate, the candidate calculates how many votes it has won. If + it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. + If candidate receives majority of votes of denials, it reverts back to + follower. + + 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election + protocol. When Config.PreVote is true, a pre-election is carried out first + (using the same rules as a regular election), and no node increases its term + number unless the pre-election indicates that the campaigining node would win. + This minimizes disruption when a partitioned node rejoins the cluster. + + 'MsgSnap' requests to install a snapshot message. When a node has just + become a leader or the leader receives 'MsgProp' message, it calls + 'bcastAppend' method, which then calls 'sendAppend' method to each + follower. In 'sendAppend', if a leader fails to get term or entries, + the leader requests snapshot by sending 'MsgSnap' type message. + + 'MsgSnapStatus' tells the result of snapshot install message. When a + follower rejected 'MsgSnap', it indicates the snapshot request with + 'MsgSnap' had failed from network issues which causes the network layer + to fail to send out snapshots to its followers. Then leader considers + follower's progress as probe. When 'MsgSnap' were not rejected, it + indicates that the snapshot succeeded and the leader sets follower's + progress to probe and resumes its log replication. + + 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed + to candidate and message's term is higher than candidate's, the candidate + reverts back to follower and updates its committed index from the one in + this heartbeat. And it sends the message to its mailbox. When + 'MsgHeartbeat' is passed to follower's Step method and message's term is + higher than follower's, the follower updates its leaderID with the ID + from the message. + + 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp' + is passed to leader's Step method, the leader knows which follower + responded. And only when the leader's last committed index is greater than + follower's Match index, the leader runs 'sendAppend` method. + + 'MsgUnreachable' tells that request(message) wasn't delivered. When + 'MsgUnreachable' is passed to leader's Step method, the leader discovers + that the follower that sent this 'MsgUnreachable' is not reachable, often + indicating 'MsgApp' is lost. When follower's progress state is replicate, + the leader sets it back to probe. + +*/ +package raft diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go new file mode 100644 index 000000000000..c3036d3c90dd --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/log.go @@ -0,0 +1,358 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "log" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +type raftLog struct { + // storage contains all stable entries since the last snapshot. + storage Storage + + // unstable contains all unstable entries and snapshot. + // they will be saved into storage. + unstable unstable + + // committed is the highest log position that is known to be in + // stable storage on a quorum of nodes. + committed uint64 + // applied is the highest log position that the application has + // been instructed to apply to its state machine. + // Invariant: applied <= committed + applied uint64 + + logger Logger +} + +// newLog returns log using the given storage. It recovers the log to the state +// that it just commits and applies the latest snapshot. +func newLog(storage Storage, logger Logger) *raftLog { + if storage == nil { + log.Panic("storage must not be nil") + } + log := &raftLog{ + storage: storage, + logger: logger, + } + firstIndex, err := storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + lastIndex, err := storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + log.unstable.offset = lastIndex + 1 + log.unstable.logger = logger + // Initialize our committed and applied pointers to the time of the last compaction. + log.committed = firstIndex - 1 + log.applied = firstIndex - 1 + + return log +} + +func (l *raftLog) String() string { + return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) +} + +// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, +// it returns (last index of new entries, true). +func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { + if l.matchTerm(index, logTerm) { + lastnewi = index + uint64(len(ents)) + ci := l.findConflict(ents) + switch { + case ci == 0: + case ci <= l.committed: + l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) + default: + offset := index + 1 + l.append(ents[ci-offset:]...) + } + l.commitTo(min(committed, lastnewi)) + return lastnewi, true + } + return 0, false +} + +func (l *raftLog) append(ents ...pb.Entry) uint64 { + if len(ents) == 0 { + return l.lastIndex() + } + if after := ents[0].Index - 1; after < l.committed { + l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) + } + l.unstable.truncateAndAppend(ents) + return l.lastIndex() +} + +// findConflict finds the index of the conflict. +// It returns the first pair of conflicting entries between the existing +// entries and the given entries, if there are any. +// If there is no conflicting entries, and the existing entries contains +// all the given entries, zero will be returned. +// If there is no conflicting entries, but the given entries contains new +// entries, the index of the first new entry will be returned. +// An entry is considered to be conflicting if it has the same index but +// a different term. +// The first entry MUST have an index equal to the argument 'from'. +// The index of the given entries MUST be continuously increasing. +func (l *raftLog) findConflict(ents []pb.Entry) uint64 { + for _, ne := range ents { + if !l.matchTerm(ne.Index, ne.Term) { + if ne.Index <= l.lastIndex() { + l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", + ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) + } + return ne.Index + } + } + return 0 +} + +func (l *raftLog) unstableEntries() []pb.Entry { + if len(l.unstable.entries) == 0 { + return nil + } + return l.unstable.entries +} + +// nextEnts returns all the available entries for execution. +// If applied is smaller than the index of snapshot, it returns all committed +// entries after the index of snapshot. +func (l *raftLog) nextEnts() (ents []pb.Entry) { + off := max(l.applied+1, l.firstIndex()) + if l.committed+1 > off { + ents, err := l.slice(off, l.committed+1, noLimit) + if err != nil { + l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) + } + return ents + } + return nil +} + +// hasNextEnts returns if there is any available entries for execution. This +// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). +func (l *raftLog) hasNextEnts() bool { + off := max(l.applied+1, l.firstIndex()) + return l.committed+1 > off +} + +func (l *raftLog) snapshot() (pb.Snapshot, error) { + if l.unstable.snapshot != nil { + return *l.unstable.snapshot, nil + } + return l.storage.Snapshot() +} + +func (l *raftLog) firstIndex() uint64 { + if i, ok := l.unstable.maybeFirstIndex(); ok { + return i + } + index, err := l.storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return index +} + +func (l *raftLog) lastIndex() uint64 { + if i, ok := l.unstable.maybeLastIndex(); ok { + return i + } + i, err := l.storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return i +} + +func (l *raftLog) commitTo(tocommit uint64) { + // never decrease commit + if l.committed < tocommit { + if l.lastIndex() < tocommit { + l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) + } + l.committed = tocommit + } +} + +func (l *raftLog) appliedTo(i uint64) { + if i == 0 { + return + } + if l.committed < i || i < l.applied { + l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) + } + l.applied = i +} + +func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } + +func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } + +func (l *raftLog) lastTerm() uint64 { + t, err := l.term(l.lastIndex()) + if err != nil { + l.logger.Panicf("unexpected error when getting the last term (%v)", err) + } + return t +} + +func (l *raftLog) term(i uint64) (uint64, error) { + // the valid term range is [index of dummy entry, last index] + dummyIndex := l.firstIndex() - 1 + if i < dummyIndex || i > l.lastIndex() { + // TODO: return an error instead? + return 0, nil + } + + if t, ok := l.unstable.maybeTerm(i); ok { + return t, nil + } + + t, err := l.storage.Term(i) + if err == nil { + return t, nil + } + if err == ErrCompacted || err == ErrUnavailable { + return 0, err + } + panic(err) // TODO(bdarnell) +} + +func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { + if i > l.lastIndex() { + return nil, nil + } + return l.slice(i, l.lastIndex()+1, maxsize) +} + +// allEntries returns all entries in the log. +func (l *raftLog) allEntries() []pb.Entry { + ents, err := l.entries(l.firstIndex(), noLimit) + if err == nil { + return ents + } + if err == ErrCompacted { // try again if there was a racing compaction + return l.allEntries() + } + // TODO (xiangli): handle error? + panic(err) +} + +// isUpToDate determines if the given (lastIndex,term) log is more up-to-date +// by comparing the index and term of the last entries in the existing logs. +// If the logs have last entries with different terms, then the log with the +// later term is more up-to-date. If the logs end with the same term, then +// whichever log has the larger lastIndex is more up-to-date. If the logs are +// the same, the given log is up-to-date. +func (l *raftLog) isUpToDate(lasti, term uint64) bool { + return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) +} + +func (l *raftLog) matchTerm(i, term uint64) bool { + t, err := l.term(i) + if err != nil { + return false + } + return t == term +} + +func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { + if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { + l.commitTo(maxIndex) + return true + } + return false +} + +func (l *raftLog) restore(s pb.Snapshot) { + l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) + l.committed = s.Metadata.Index + l.unstable.restore(s) +} + +// slice returns a slice of log entries from lo through hi-1, inclusive. +func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { + err := l.mustCheckOutOfBounds(lo, hi) + if err != nil { + return nil, err + } + if lo == hi { + return nil, nil + } + var ents []pb.Entry + if lo < l.unstable.offset { + storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) + if err == ErrCompacted { + return nil, err + } else if err == ErrUnavailable { + l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) + } else if err != nil { + panic(err) // TODO(bdarnell) + } + + // check if ents has reached the size limitation + if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { + return storedEnts, nil + } + + ents = storedEnts + } + if hi > l.unstable.offset { + unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) + if len(ents) > 0 { + ents = append([]pb.Entry{}, ents...) + ents = append(ents, unstable...) + } else { + ents = unstable + } + } + return limitSize(ents, maxSize), nil +} + +// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) +func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { + if lo > hi { + l.logger.Panicf("invalid slice %d > %d", lo, hi) + } + fi := l.firstIndex() + if lo < fi { + return ErrCompacted + } + + length := l.lastIndex() + 1 - fi + if lo < fi || hi > fi+length { + l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) + } + return nil +} + +func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { + if err == nil { + return t + } + if err == ErrCompacted { + return 0 + } + l.logger.Panicf("unexpected error (%v)", err) + return 0 +} diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go new file mode 100644 index 000000000000..263af9ce405e --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go @@ -0,0 +1,159 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "github.com/coreos/etcd/raft/raftpb" + +// unstable.entries[i] has raft log position i+unstable.offset. +// Note that unstable.offset may be less than the highest log +// position in storage; this means that the next write to storage +// might need to truncate the log before persisting unstable.entries. +type unstable struct { + // the incoming unstable snapshot, if any. + snapshot *pb.Snapshot + // all entries that have not yet been written to storage. + entries []pb.Entry + offset uint64 + + logger Logger +} + +// maybeFirstIndex returns the index of the first possible entry in entries +// if it has a snapshot. +func (u *unstable) maybeFirstIndex() (uint64, bool) { + if u.snapshot != nil { + return u.snapshot.Metadata.Index + 1, true + } + return 0, false +} + +// maybeLastIndex returns the last index if it has at least one +// unstable entry or snapshot. +func (u *unstable) maybeLastIndex() (uint64, bool) { + if l := len(u.entries); l != 0 { + return u.offset + uint64(l) - 1, true + } + if u.snapshot != nil { + return u.snapshot.Metadata.Index, true + } + return 0, false +} + +// maybeTerm returns the term of the entry at index i, if there +// is any. +func (u *unstable) maybeTerm(i uint64) (uint64, bool) { + if i < u.offset { + if u.snapshot == nil { + return 0, false + } + if u.snapshot.Metadata.Index == i { + return u.snapshot.Metadata.Term, true + } + return 0, false + } + + last, ok := u.maybeLastIndex() + if !ok { + return 0, false + } + if i > last { + return 0, false + } + return u.entries[i-u.offset].Term, true +} + +func (u *unstable) stableTo(i, t uint64) { + gt, ok := u.maybeTerm(i) + if !ok { + return + } + // if i < offset, term is matched with the snapshot + // only update the unstable entries if term is matched with + // an unstable entry. + if gt == t && i >= u.offset { + u.entries = u.entries[i+1-u.offset:] + u.offset = i + 1 + u.shrinkEntriesArray() + } +} + +// shrinkEntriesArray discards the underlying array used by the entries slice +// if most of it isn't being used. This avoids holding references to a bunch of +// potentially large entries that aren't needed anymore. Simply clearing the +// entries wouldn't be safe because clients might still be using them. +func (u *unstable) shrinkEntriesArray() { + // We replace the array if we're using less than half of the space in + // it. This number is fairly arbitrary, chosen as an attempt to balance + // memory usage vs number of allocations. It could probably be improved + // with some focused tuning. + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries + } +} + +func (u *unstable) stableSnapTo(i uint64) { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + u.snapshot = nil + } +} + +func (u *unstable) restore(s pb.Snapshot) { + u.offset = s.Metadata.Index + 1 + u.entries = nil + u.snapshot = &s +} + +func (u *unstable) truncateAndAppend(ents []pb.Entry) { + after := ents[0].Index + switch { + case after == u.offset+uint64(len(u.entries)): + // after is the next index in the u.entries + // directly append + u.entries = append(u.entries, ents...) + case after <= u.offset: + u.logger.Infof("replace the unstable entries from index %d", after) + // The log is being truncated to before our current offset + // portion, so set the offset and replace the entries + u.offset = after + u.entries = ents + default: + // truncate to after and copy to u.entries + // then append + u.logger.Infof("truncate the unstable entries before index %d", after) + u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...) + u.entries = append(u.entries, ents...) + } +} + +func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { + u.mustCheckOutOfBounds(lo, hi) + return u.entries[lo-u.offset : hi-u.offset] +} + +// u.offset <= lo <= hi <= u.offset+len(u.offset) +func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { + if lo > hi { + u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) + } + upper := u.offset + uint64(len(u.entries)) + if lo < u.offset || hi > upper { + u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) + } +} diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go new file mode 100644 index 000000000000..92e55b373e1d --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/logger.go @@ -0,0 +1,126 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "io/ioutil" + "log" + "os" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func SetLogger(l Logger) { raftLogger = l } + +var ( + defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} + discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + raftLogger = Logger(defaultLogger) +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go new file mode 100644 index 000000000000..5da1c1193b27 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/node.go @@ -0,0 +1,537 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/coreos/etcd/raft/raftpb" + "golang.org/x/net/context" +) + +type SnapshotStatus int + +const ( + SnapshotFinish SnapshotStatus = 1 + SnapshotFailure SnapshotStatus = 2 +) + +var ( + emptyState = pb.HardState{} + + // ErrStopped is returned by methods on Nodes that have been stopped. + ErrStopped = errors.New("raft: stopped") +) + +// SoftState provides state that is useful for logging and debugging. +// The state is volatile and does not need to be persisted to the WAL. +type SoftState struct { + Lead uint64 // must use atomic operations to access; keep 64-bit aligned. + RaftState StateType +} + +func (a *SoftState) equal(b *SoftState) bool { + return a.Lead == b.Lead && a.RaftState == b.RaftState +} + +// Ready encapsulates the entries and messages that are ready to read, +// be saved to stable storage, committed or sent to other peers. +// All fields in Ready are read-only. +type Ready struct { + // The current volatile state of a Node. + // SoftState will be nil if there is no update. + // It is not required to consume or store SoftState. + *SoftState + + // The current state of a Node to be saved to stable storage BEFORE + // Messages are sent. + // HardState will be equal to empty state if there is no update. + pb.HardState + + // ReadStates can be used for node to serve linearizable read requests locally + // when its applied index is greater than the index in ReadState. + // Note that the readState will be returned when raft receives msgReadIndex. + // The returned is only valid for the request that requested to read. + ReadStates []ReadState + + // Entries specifies entries to be saved to stable storage BEFORE + // Messages are sent. + Entries []pb.Entry + + // Snapshot specifies the snapshot to be saved to stable storage. + Snapshot pb.Snapshot + + // CommittedEntries specifies entries to be committed to a + // store/state-machine. These have previously been committed to stable + // store. + CommittedEntries []pb.Entry + + // Messages specifies outbound messages to be sent AFTER Entries are + // committed to stable storage. + // If it contains a MsgSnap message, the application MUST report back to raft + // when the snapshot has been received or has failed by calling ReportSnapshot. + Messages []pb.Message + + // MustSync indicates whether the HardState and Entries must be synchronously + // written to disk or if an asynchronous write is permissible. + MustSync bool +} + +func isHardStateEqual(a, b pb.HardState) bool { + return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit +} + +// IsEmptyHardState returns true if the given HardState is empty. +func IsEmptyHardState(st pb.HardState) bool { + return isHardStateEqual(st, emptyState) +} + +// IsEmptySnap returns true if the given Snapshot is empty. +func IsEmptySnap(sp pb.Snapshot) bool { + return sp.Metadata.Index == 0 +} + +func (rd Ready) containsUpdates() bool { + return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || + !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || + len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 +} + +// Node represents a node in a raft cluster. +type Node interface { + // Tick increments the internal logical clock for the Node by a single tick. Election + // timeouts and heartbeat timeouts are in units of ticks. + Tick() + // Campaign causes the Node to transition to candidate state and start campaigning to become leader. + Campaign(ctx context.Context) error + // Propose proposes that data be appended to the log. + Propose(ctx context.Context, data []byte) error + // ProposeConfChange proposes config change. + // At most one ConfChange can be in the process of going through consensus. + // Application needs to call ApplyConfChange when applying EntryConfChange type entry. + ProposeConfChange(ctx context.Context, cc pb.ConfChange) error + // Step advances the state machine using the given message. ctx.Err() will be returned, if any. + Step(ctx context.Context, msg pb.Message) error + + // Ready returns a channel that returns the current point-in-time state. + // Users of the Node must call Advance after retrieving the state returned by Ready. + // + // NOTE: No committed entries from the next Ready may be applied until all committed entries + // and snapshots from the previous one have finished. + Ready() <-chan Ready + + // Advance notifies the Node that the application has saved progress up to the last Ready. + // It prepares the node to return the next available Ready. + // + // The application should generally call Advance after it applies the entries in last Ready. + // + // However, as an optimization, the application may call Advance while it is applying the + // commands. For example. when the last Ready contains a snapshot, the application might take + // a long time to apply the snapshot data. To continue receiving Ready without blocking raft + // progress, it can call Advance before finishing applying the last ready. + Advance() + // ApplyConfChange applies config change to the local node. + // Returns an opaque ConfState protobuf which must be recorded + // in snapshots. Will never return nil; it returns a pointer only + // to match MemoryStorage.Compact. + ApplyConfChange(cc pb.ConfChange) *pb.ConfState + + // TransferLeadership attempts to transfer leadership to the given transferee. + TransferLeadership(ctx context.Context, lead, transferee uint64) + + // ReadIndex request a read state. The read state will be set in the ready. + // Read state has a read index. Once the application advances further than the read + // index, any linearizable read requests issued before the read request can be + // processed safely. The read state will have the same rctx attached. + ReadIndex(ctx context.Context, rctx []byte) error + + // Status returns the current status of the raft state machine. + Status() Status + // ReportUnreachable reports the given node is not reachable for the last send. + ReportUnreachable(id uint64) + // ReportSnapshot reports the status of the sent snapshot. + ReportSnapshot(id uint64, status SnapshotStatus) + // Stop performs any necessary termination of the Node. + Stop() +} + +type Peer struct { + ID uint64 + Context []byte +} + +// StartNode returns a new Node given configuration and a list of raft peers. +// It appends a ConfChangeAddNode entry for each given peer to the initial log. +func StartNode(c *Config, peers []Peer) Node { + r := newRaft(c) + // become the follower at term 1 and apply initial configuration + // entries of term 1 + r.becomeFollower(1, None) + for _, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + d, err := cc.Marshal() + if err != nil { + panic("unexpected marshal error") + } + e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d} + r.raftLog.append(e) + } + // Mark these initial entries as committed. + // TODO(bdarnell): These entries are still unstable; do we need to preserve + // the invariant that committed < unstable? + r.raftLog.committed = r.raftLog.lastIndex() + // Now apply them, mainly so that the application can call Campaign + // immediately after StartNode in tests. Note that these nodes will + // be added to raft twice: here and when the application's Ready + // loop calls ApplyConfChange. The calls to addNode must come after + // all calls to raftLog.append so progress.next is set after these + // bootstrapping entries (it is an error if we try to append these + // entries since they have already been committed). + // We do not set raftLog.applied so the application will be able + // to observe all conf changes via Ready.CommittedEntries. + for _, peer := range peers { + r.addNode(peer.ID) + } + + n := newNode() + n.logger = c.Logger + go n.run(r) + return &n +} + +// RestartNode is similar to StartNode but does not take a list of peers. +// The current membership of the cluster will be restored from the Storage. +// If the caller has an existing state machine, pass in the last log index that +// has been applied to it; otherwise use zero. +func RestartNode(c *Config) Node { + r := newRaft(c) + + n := newNode() + n.logger = c.Logger + go n.run(r) + return &n +} + +// node is the canonical implementation of the Node interface +type node struct { + propc chan pb.Message + recvc chan pb.Message + confc chan pb.ConfChange + confstatec chan pb.ConfState + readyc chan Ready + advancec chan struct{} + tickc chan struct{} + done chan struct{} + stop chan struct{} + status chan chan Status + + logger Logger +} + +func newNode() node { + return node{ + propc: make(chan pb.Message), + recvc: make(chan pb.Message), + confc: make(chan pb.ConfChange), + confstatec: make(chan pb.ConfState), + readyc: make(chan Ready), + advancec: make(chan struct{}), + // make tickc a buffered chan, so raft node can buffer some ticks when the node + // is busy processing raft messages. Raft node will resume process buffered + // ticks when it becomes idle. + tickc: make(chan struct{}, 128), + done: make(chan struct{}), + stop: make(chan struct{}), + status: make(chan chan Status), + } +} + +func (n *node) Stop() { + select { + case n.stop <- struct{}{}: + // Not already stopped, so trigger it + case <-n.done: + // Node has already been stopped - no need to do anything + return + } + // Block until the stop has been acknowledged by run() + <-n.done +} + +func (n *node) run(r *raft) { + var propc chan pb.Message + var readyc chan Ready + var advancec chan struct{} + var prevLastUnstablei, prevLastUnstablet uint64 + var havePrevLastUnstablei bool + var prevSnapi uint64 + var rd Ready + + lead := None + prevSoftSt := r.softState() + prevHardSt := emptyState + + for { + if advancec != nil { + readyc = nil + } else { + rd = newReady(r, prevSoftSt, prevHardSt) + if rd.containsUpdates() { + readyc = n.readyc + } else { + readyc = nil + } + } + + if lead != r.lead { + if r.hasLeader() { + if lead == None { + r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term) + } else { + r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term) + } + propc = n.propc + } else { + r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term) + propc = nil + } + lead = r.lead + } + + select { + // TODO: maybe buffer the config propose if there exists one (the way + // described in raft dissertation) + // Currently it is dropped in Step silently. + case m := <-propc: + m.From = r.id + r.Step(m) + case m := <-n.recvc: + // filter out response message from unknown From. + if _, ok := r.prs[m.From]; ok || !IsResponseMsg(m.Type) { + r.Step(m) // raft never returns an error + } + case cc := <-n.confc: + if cc.NodeID == None { + r.resetPendingConf() + select { + case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: + case <-n.done: + } + break + } + switch cc.Type { + case pb.ConfChangeAddNode: + r.addNode(cc.NodeID) + case pb.ConfChangeRemoveNode: + // block incoming proposal when local node is + // removed + if cc.NodeID == r.id { + propc = nil + } + r.removeNode(cc.NodeID) + case pb.ConfChangeUpdateNode: + r.resetPendingConf() + default: + panic("unexpected conf type") + } + select { + case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: + case <-n.done: + } + case <-n.tickc: + r.tick() + case readyc <- rd: + if rd.SoftState != nil { + prevSoftSt = rd.SoftState + } + if len(rd.Entries) > 0 { + prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index + prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term + havePrevLastUnstablei = true + } + if !IsEmptyHardState(rd.HardState) { + prevHardSt = rd.HardState + } + if !IsEmptySnap(rd.Snapshot) { + prevSnapi = rd.Snapshot.Metadata.Index + } + + r.msgs = nil + r.readStates = nil + advancec = n.advancec + case <-advancec: + if prevHardSt.Commit != 0 { + r.raftLog.appliedTo(prevHardSt.Commit) + } + if havePrevLastUnstablei { + r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet) + havePrevLastUnstablei = false + } + r.raftLog.stableSnapTo(prevSnapi) + advancec = nil + case c := <-n.status: + c <- getStatus(r) + case <-n.stop: + close(n.done) + return + } + } +} + +// Tick increments the internal logical clock for this Node. Election timeouts +// and heartbeat timeouts are in units of ticks. +func (n *node) Tick() { + select { + case n.tickc <- struct{}{}: + case <-n.done: + default: + n.logger.Warningf("A tick missed to fire. Node blocks too long!") + } +} + +func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } + +func (n *node) Propose(ctx context.Context, data []byte) error { + return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) +} + +func (n *node) Step(ctx context.Context, m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + // TODO: return an error? + return nil + } + return n.step(ctx, m) +} + +func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error { + data, err := cc.Marshal() + if err != nil { + return err + } + return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}}) +} + +// Step advances the state machine using msgs. The ctx.Err() will be returned, +// if any. +func (n *node) step(ctx context.Context, m pb.Message) error { + ch := n.recvc + if m.Type == pb.MsgProp { + ch = n.propc + } + + select { + case ch <- m: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } +} + +func (n *node) Ready() <-chan Ready { return n.readyc } + +func (n *node) Advance() { + select { + case n.advancec <- struct{}{}: + case <-n.done: + } +} + +func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { + var cs pb.ConfState + select { + case n.confc <- cc: + case <-n.done: + } + select { + case cs = <-n.confstatec: + case <-n.done: + } + return &cs +} + +func (n *node) Status() Status { + c := make(chan Status) + select { + case n.status <- c: + return <-c + case <-n.done: + return Status{} + } +} + +func (n *node) ReportUnreachable(id uint64) { + select { + case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: + case <-n.done: + } +} + +func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + select { + case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: + case <-n.done: + } +} + +func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) { + select { + // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership + case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}: + case <-n.done: + case <-ctx.Done(): + } +} + +func (n *node) ReadIndex(ctx context.Context, rctx []byte) error { + return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} + +func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { + rd := Ready{ + Entries: r.raftLog.unstableEntries(), + CommittedEntries: r.raftLog.nextEnts(), + Messages: r.msgs, + } + if softSt := r.softState(); !softSt.equal(prevSoftSt) { + rd.SoftState = softSt + } + if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { + rd.HardState = hardSt + } + if r.raftLog.unstable.snapshot != nil { + rd.Snapshot = *r.raftLog.unstable.snapshot + } + if len(r.readStates) != 0 { + rd.ReadStates = r.readStates + } + rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries)) + return rd +} + +// MustSync returns true if the hard state and count of Raft entries indicate +// that a synchronous write to persistent storage is required. +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go new file mode 100644 index 000000000000..77c7b52efe38 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/progress.go @@ -0,0 +1,279 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import "fmt" + +const ( + ProgressStateProbe ProgressStateType = iota + ProgressStateReplicate + ProgressStateSnapshot +) + +type ProgressStateType uint64 + +var prstmap = [...]string{ + "ProgressStateProbe", + "ProgressStateReplicate", + "ProgressStateSnapshot", +} + +func (st ProgressStateType) String() string { return prstmap[uint64(st)] } + +// Progress represents a follower’s progress in the view of the leader. Leader maintains +// progresses of all followers, and sends entries to the follower based on its progress. +type Progress struct { + Match, Next uint64 + // State defines how the leader should interact with the follower. + // + // When in ProgressStateProbe, leader sends at most one replication message + // per heartbeat interval. It also probes actual progress of the follower. + // + // When in ProgressStateReplicate, leader optimistically increases next + // to the latest entry sent after sending replication message. This is + // an optimized state for fast replicating log entries to the follower. + // + // When in ProgressStateSnapshot, leader should have sent out snapshot + // before and stops sending any replication message. + State ProgressStateType + // Paused is used in ProgressStateProbe. + // When Paused is true, raft should pause sending replication message to this peer. + Paused bool + // PendingSnapshot is used in ProgressStateSnapshot. + // If there is a pending snapshot, the pendingSnapshot will be set to the + // index of the snapshot. If pendingSnapshot is set, the replication process of + // this Progress will be paused. raft will not resend snapshot until the pending one + // is reported to be failed. + PendingSnapshot uint64 + + // RecentActive is true if the progress is recently active. Receiving any messages + // from the corresponding follower indicates the progress is active. + // RecentActive can be reset to false after an election timeout. + RecentActive bool + + // inflights is a sliding window for the inflight messages. + // Each inflight message contains one or more log entries. + // The max number of entries per message is defined in raft config as MaxSizePerMsg. + // Thus inflight effectively limits both the number of inflight messages + // and the bandwidth each Progress can use. + // When inflights is full, no more message should be sent. + // When a leader sends out a message, the index of the last + // entry should be added to inflights. The index MUST be added + // into inflights in order. + // When a leader receives a reply, the previous inflights should + // be freed by calling inflights.freeTo with the index of the last + // received entry. + ins *inflights +} + +func (pr *Progress) resetState(state ProgressStateType) { + pr.Paused = false + pr.PendingSnapshot = 0 + pr.State = state + pr.ins.reset() +} + +func (pr *Progress) becomeProbe() { + // If the original state is ProgressStateSnapshot, progress knows that + // the pending snapshot has been sent to this peer successfully, then + // probes from pendingSnapshot + 1. + if pr.State == ProgressStateSnapshot { + pendingSnapshot := pr.PendingSnapshot + pr.resetState(ProgressStateProbe) + pr.Next = max(pr.Match+1, pendingSnapshot+1) + } else { + pr.resetState(ProgressStateProbe) + pr.Next = pr.Match + 1 + } +} + +func (pr *Progress) becomeReplicate() { + pr.resetState(ProgressStateReplicate) + pr.Next = pr.Match + 1 +} + +func (pr *Progress) becomeSnapshot(snapshoti uint64) { + pr.resetState(ProgressStateSnapshot) + pr.PendingSnapshot = snapshoti +} + +// maybeUpdate returns false if the given n index comes from an outdated message. +// Otherwise it updates the progress and returns true. +func (pr *Progress) maybeUpdate(n uint64) bool { + var updated bool + if pr.Match < n { + pr.Match = n + updated = true + pr.resume() + } + if pr.Next < n+1 { + pr.Next = n + 1 + } + return updated +} + +func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 } + +// maybeDecrTo returns false if the given to index comes from an out of order message. +// Otherwise it decreases the progress next index to min(rejected, last) and returns true. +func (pr *Progress) maybeDecrTo(rejected, last uint64) bool { + if pr.State == ProgressStateReplicate { + // the rejection must be stale if the progress has matched and "rejected" + // is smaller than "match". + if rejected <= pr.Match { + return false + } + // directly decrease next to match + 1 + pr.Next = pr.Match + 1 + return true + } + + // the rejection must be stale if "rejected" does not match next - 1 + if pr.Next-1 != rejected { + return false + } + + if pr.Next = min(rejected, last+1); pr.Next < 1 { + pr.Next = 1 + } + pr.resume() + return true +} + +func (pr *Progress) pause() { pr.Paused = true } +func (pr *Progress) resume() { pr.Paused = false } + +// IsPaused returns whether sending log entries to this node has been +// paused. A node may be paused because it has rejected recent +// MsgApps, is currently waiting for a snapshot, or has reached the +// MaxInflightMsgs limit. +func (pr *Progress) IsPaused() bool { + switch pr.State { + case ProgressStateProbe: + return pr.Paused + case ProgressStateReplicate: + return pr.ins.full() + case ProgressStateSnapshot: + return true + default: + panic("unexpected state") + } +} + +func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 } + +// needSnapshotAbort returns true if snapshot progress's Match +// is equal or higher than the pendingSnapshot. +func (pr *Progress) needSnapshotAbort() bool { + return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot +} + +func (pr *Progress) String() string { + return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot) +} + +type inflights struct { + // the starting index in the buffer + start int + // number of inflights in the buffer + count int + + // the size of the buffer + size int + + // buffer contains the index of the last entry + // inside one message. + buffer []uint64 +} + +func newInflights(size int) *inflights { + return &inflights{ + size: size, + } +} + +// add adds an inflight into inflights +func (in *inflights) add(inflight uint64) { + if in.full() { + panic("cannot add into a full inflights") + } + next := in.start + in.count + size := in.size + if next >= size { + next -= size + } + if next >= len(in.buffer) { + in.growBuf() + } + in.buffer[next] = inflight + in.count++ +} + +// grow the inflight buffer by doubling up to inflights.size. We grow on demand +// instead of preallocating to inflights.size to handle systems which have +// thousands of Raft groups per process. +func (in *inflights) growBuf() { + newSize := len(in.buffer) * 2 + if newSize == 0 { + newSize = 1 + } else if newSize > in.size { + newSize = in.size + } + newBuffer := make([]uint64, newSize) + copy(newBuffer, in.buffer) + in.buffer = newBuffer +} + +// freeTo frees the inflights smaller or equal to the given `to` flight. +func (in *inflights) freeTo(to uint64) { + if in.count == 0 || to < in.buffer[in.start] { + // out of the left side of the window + return + } + + i, idx := 0, in.start + for i = 0; i < in.count; i++ { + if to < in.buffer[idx] { // found the first large inflight + break + } + + // increase index and maybe rotate + size := in.size + if idx++; idx >= size { + idx -= size + } + } + // free i inflights and set new start index + in.count -= i + in.start = idx + if in.count == 0 { + // inflights is empty, reset the start index so that we don't grow the + // buffer unnecessarily. + in.start = 0 + } +} + +func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) } + +// full returns true if the inflights is full. +func (in *inflights) full() bool { + return in.count == in.size +} + +// resets frees all inflights. +func (in *inflights) reset() { + in.count = 0 + in.start = 0 +} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go new file mode 100644 index 000000000000..29f203982032 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raft.go @@ -0,0 +1,1257 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "errors" + "fmt" + "math" + "math/rand" + "sort" + "strings" + "sync" + "time" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// None is a placeholder node ID used when there is no leader. +const None uint64 = 0 +const noLimit = math.MaxUint64 + +// Possible values for StateType. +const ( + StateFollower StateType = iota + StateCandidate + StateLeader + StatePreCandidate + numStates +) + +type ReadOnlyOption int + +const ( + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + ReadOnlySafe ReadOnlyOption = iota + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + ReadOnlyLeaseBased +) + +// Possible values for CampaignType +const ( + // campaignPreElection represents the first phase of a normal election when + // Config.PreVote is true. + campaignPreElection CampaignType = "CampaignPreElection" + // campaignElection represents a normal (time-based) election (the second phase + // of the election when Config.PreVote is true). + campaignElection CampaignType = "CampaignElection" + // campaignTransfer represents the type of leader transfer + campaignTransfer CampaignType = "CampaignTransfer" +) + +// lockedRand is a small wrapper around rand.Rand to provide +// synchronization. Only the methods needed by the code are exposed +// (e.g. Intn). +type lockedRand struct { + mu sync.Mutex + rand *rand.Rand +} + +func (r *lockedRand) Intn(n int) int { + r.mu.Lock() + v := r.rand.Intn(n) + r.mu.Unlock() + return v +} + +var globalRand = &lockedRand{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// CampaignType represents the type of campaigning +// the reason we use the type of string instead of uint64 +// is because it's simpler to compare and fill in raft entries +type CampaignType string + +// StateType represents the role of a node in a cluster. +type StateType uint64 + +var stmap = [...]string{ + "StateFollower", + "StateCandidate", + "StateLeader", + "StatePreCandidate", +} + +func (st StateType) String() string { + return stmap[uint64(st)] +} + +// Config contains the parameters to start a raft. +type Config struct { + // ID is the identity of the local raft. ID cannot be 0. + ID uint64 + + // peers contains the IDs of all nodes (including self) in the raft cluster. It + // should only be set when starting a new raft cluster. Restarting raft from + // previous configuration will panic if peers is set. peer is private and only + // used for testing right now. + peers []uint64 + + // ElectionTick is the number of Node.Tick invocations that must pass between + // elections. That is, if a follower does not receive any message from the + // leader of current term before ElectionTick has elapsed, it will become + // candidate and start an election. ElectionTick must be greater than + // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid + // unnecessary leader switching. + ElectionTick int + // HeartbeatTick is the number of Node.Tick invocations that must pass between + // heartbeats. That is, a leader sends heartbeat messages to maintain its + // leadership every HeartbeatTick ticks. + HeartbeatTick int + + // Storage is the storage for raft. raft generates entries and states to be + // stored in storage. raft reads the persisted entries and states out of + // Storage when it needs. raft reads out the previous state and configuration + // out of storage when restarting. + Storage Storage + // Applied is the last applied index. It should only be set when restarting + // raft. raft will not return entries to the application smaller or equal to + // Applied. If Applied is unset when restarting, raft might return previous + // applied entries. This is a very application dependent configuration. + Applied uint64 + + // MaxSizePerMsg limits the max size of each append message. Smaller value + // lowers the raft recovery cost(initial probing and message lost during normal + // operation). On the other side, it might affect the throughput during normal + // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per + // message. + MaxSizePerMsg uint64 + // MaxInflightMsgs limits the max number of in-flight append messages during + // optimistic replication phase. The application transportation layer usually + // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid + // overflowing that sending buffer. TODO (xiangli): feedback to application to + // limit the proposal rate? + MaxInflightMsgs int + + // CheckQuorum specifies if the leader should check quorum activity. Leader + // steps down when quorum is not active for an electionTimeout. + CheckQuorum bool + + // PreVote enables the Pre-Vote algorithm described in raft thesis section + // 9.6. This prevents disruption when a node that has been partitioned away + // rejoins the cluster. + PreVote bool + + // ReadOnlyOption specifies how the read only request is processed. + // + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + // + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + ReadOnlyOption ReadOnlyOption + + // Logger is the logger used for raft log. For multinode which can host + // multiple raft group, each raft group can have its own logger + Logger Logger +} + +func (c *Config) validate() error { + if c.ID == None { + return errors.New("cannot use none as id") + } + + if c.HeartbeatTick <= 0 { + return errors.New("heartbeat tick must be greater than 0") + } + + if c.ElectionTick <= c.HeartbeatTick { + return errors.New("election tick must be greater than heartbeat tick") + } + + if c.Storage == nil { + return errors.New("storage cannot be nil") + } + + if c.MaxInflightMsgs <= 0 { + return errors.New("max inflight messages must be greater than 0") + } + + if c.Logger == nil { + c.Logger = raftLogger + } + + return nil +} + +type raft struct { + id uint64 + + Term uint64 + Vote uint64 + + readStates []ReadState + + // the log + raftLog *raftLog + + maxInflight int + maxMsgSize uint64 + prs map[uint64]*Progress + + state StateType + + votes map[uint64]bool + + msgs []pb.Message + + // the leader id + lead uint64 + // leadTransferee is id of the leader transfer target when its value is not zero. + // Follow the procedure defined in raft thesis 3.10. + leadTransferee uint64 + // New configuration is ignored if there exists unapplied configuration. + pendingConf bool + + readOnly *readOnly + + // number of ticks since it reached last electionTimeout when it is leader + // or candidate. + // number of ticks since it reached last electionTimeout or received a + // valid message from current leader when it is a follower. + electionElapsed int + + // number of ticks since it reached last heartbeatTimeout. + // only leader keeps heartbeatElapsed. + heartbeatElapsed int + + checkQuorum bool + preVote bool + + heartbeatTimeout int + electionTimeout int + // randomizedElectionTimeout is a random number between + // [electiontimeout, 2 * electiontimeout - 1]. It gets reset + // when raft changes its state to follower or candidate. + randomizedElectionTimeout int + + tick func() + step stepFunc + + logger Logger +} + +func newRaft(c *Config) *raft { + if err := c.validate(); err != nil { + panic(err.Error()) + } + raftlog := newLog(c.Storage, c.Logger) + hs, cs, err := c.Storage.InitialState() + if err != nil { + panic(err) // TODO(bdarnell) + } + peers := c.peers + if len(cs.Nodes) > 0 { + if len(peers) > 0 { + // TODO(bdarnell): the peers argument is always nil except in + // tests; the argument should be removed and these tests should be + // updated to specify their nodes through a snapshot. + panic("cannot specify both newRaft(peers) and ConfState.Nodes)") + } + peers = cs.Nodes + } + r := &raft{ + id: c.ID, + lead: None, + raftLog: raftlog, + maxMsgSize: c.MaxSizePerMsg, + maxInflight: c.MaxInflightMsgs, + prs: make(map[uint64]*Progress), + electionTimeout: c.ElectionTick, + heartbeatTimeout: c.HeartbeatTick, + logger: c.Logger, + checkQuorum: c.CheckQuorum, + preVote: c.PreVote, + readOnly: newReadOnly(c.ReadOnlyOption), + } + for _, p := range peers { + r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} + } + if !isHardStateEqual(hs, emptyState) { + r.loadState(hs) + } + if c.Applied > 0 { + raftlog.appliedTo(c.Applied) + } + r.becomeFollower(r.Term, None) + + var nodesStrs []string + for _, n := range r.nodes() { + nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) + } + + r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]", + r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) + return r +} + +func (r *raft) hasLeader() bool { return r.lead != None } + +func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } + +func (r *raft) hardState() pb.HardState { + return pb.HardState{ + Term: r.Term, + Vote: r.Vote, + Commit: r.raftLog.committed, + } +} + +func (r *raft) quorum() int { return len(r.prs)/2 + 1 } + +func (r *raft) nodes() []uint64 { + nodes := make([]uint64, 0, len(r.prs)) + for id := range r.prs { + nodes = append(nodes, id) + } + sort.Sort(uint64Slice(nodes)) + return nodes +} + +// send persists state to stable storage and then sends to its mailbox. +func (r *raft) send(m pb.Message) { + m.From = r.id + if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + if m.Term == 0 { + // PreVote RPCs are sent at a term other than our actual term, so the code + // that sends these messages is responsible for setting the term. + panic(fmt.Sprintf("term should be set when sending %s", m.Type)) + } + } else { + if m.Term != 0 { + panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term)) + } + // do not attach term to MsgProp, MsgReadIndex + // proposals are a way to forward to the leader and + // should be treated as local message. + // MsgReadIndex is also forwarded to leader. + if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex { + m.Term = r.Term + } + } + r.msgs = append(r.msgs, m) +} + +// sendAppend sends RPC, with entries to the given peer. +func (r *raft) sendAppend(to uint64) { + pr := r.prs[to] + if pr.IsPaused() { + return + } + m := pb.Message{} + m.To = to + + term, errt := r.raftLog.term(pr.Next - 1) + ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) + + if errt != nil || erre != nil { // send snapshot if we failed to get term or entries + if !pr.RecentActive { + r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) + return + } + + m.Type = pb.MsgSnap + snapshot, err := r.raftLog.snapshot() + if err != nil { + if err == ErrSnapshotTemporarilyUnavailable { + r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) + return + } + panic(err) // TODO(bdarnell) + } + if IsEmptySnap(snapshot) { + panic("need non-empty snapshot") + } + m.Snapshot = snapshot + sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term + r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", + r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) + pr.becomeSnapshot(sindex) + r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) + } else { + m.Type = pb.MsgApp + m.Index = pr.Next - 1 + m.LogTerm = term + m.Entries = ents + m.Commit = r.raftLog.committed + if n := len(m.Entries); n != 0 { + switch pr.State { + // optimistically increase the next when in ProgressStateReplicate + case ProgressStateReplicate: + last := m.Entries[n-1].Index + pr.optimisticUpdate(last) + pr.ins.add(last) + case ProgressStateProbe: + pr.pause() + default: + r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) + } + } + } + r.send(m) +} + +// sendHeartbeat sends an empty MsgApp +func (r *raft) sendHeartbeat(to uint64, ctx []byte) { + // Attach the commit as min(to.matched, r.committed). + // When the leader sends out heartbeat message, + // the receiver(follower) might not be matched with the leader + // or it might not have all the committed entries. + // The leader MUST NOT forward the follower's commit to + // an unmatched index. + commit := min(r.prs[to].Match, r.raftLog.committed) + m := pb.Message{ + To: to, + Type: pb.MsgHeartbeat, + Commit: commit, + Context: ctx, + } + + r.send(m) +} + +// bcastAppend sends RPC, with entries to all peers that are not up-to-date +// according to the progress recorded in r.prs. +func (r *raft) bcastAppend() { + for id := range r.prs { + if id == r.id { + continue + } + r.sendAppend(id) + } +} + +// bcastHeartbeat sends RPC, without entries to all the peers. +func (r *raft) bcastHeartbeat() { + lastCtx := r.readOnly.lastPendingRequestCtx() + if len(lastCtx) == 0 { + r.bcastHeartbeatWithCtx(nil) + } else { + r.bcastHeartbeatWithCtx([]byte(lastCtx)) + } +} + +func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { + for id := range r.prs { + if id == r.id { + continue + } + r.sendHeartbeat(id, ctx) + } +} + +// maybeCommit attempts to advance the commit index. Returns true if +// the commit index changed (in which case the caller should call +// r.bcastAppend). +func (r *raft) maybeCommit() bool { + // TODO(bmizerany): optimize.. Currently naive + mis := make(uint64Slice, 0, len(r.prs)) + for id := range r.prs { + mis = append(mis, r.prs[id].Match) + } + sort.Sort(sort.Reverse(mis)) + mci := mis[r.quorum()-1] + return r.raftLog.maybeCommit(mci, r.Term) +} + +func (r *raft) reset(term uint64) { + if r.Term != term { + r.Term = term + r.Vote = None + } + r.lead = None + + r.electionElapsed = 0 + r.heartbeatElapsed = 0 + r.resetRandomizedElectionTimeout() + + r.abortLeaderTransfer() + + r.votes = make(map[uint64]bool) + for id := range r.prs { + r.prs[id] = &Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight)} + if id == r.id { + r.prs[id].Match = r.raftLog.lastIndex() + } + } + r.pendingConf = false + r.readOnly = newReadOnly(r.readOnly.option) +} + +func (r *raft) appendEntry(es ...pb.Entry) { + li := r.raftLog.lastIndex() + for i := range es { + es[i].Term = r.Term + es[i].Index = li + 1 + uint64(i) + } + r.raftLog.append(es...) + r.prs[r.id].maybeUpdate(r.raftLog.lastIndex()) + // Regardless of maybeCommit's return, our caller will call bcastAppend. + r.maybeCommit() +} + +// tickElection is run by followers and candidates after r.electionTimeout. +func (r *raft) tickElection() { + r.electionElapsed++ + + if r.promotable() && r.pastElectionTimeout() { + r.electionElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) + } +} + +// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. +func (r *raft) tickHeartbeat() { + r.heartbeatElapsed++ + r.electionElapsed++ + + if r.electionElapsed >= r.electionTimeout { + r.electionElapsed = 0 + if r.checkQuorum { + r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + } + // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. + if r.state == StateLeader && r.leadTransferee != None { + r.abortLeaderTransfer() + } + } + + if r.state != StateLeader { + return + } + + if r.heartbeatElapsed >= r.heartbeatTimeout { + r.heartbeatElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) + } +} + +func (r *raft) becomeFollower(term uint64, lead uint64) { + r.step = stepFollower + r.reset(term) + r.tick = r.tickElection + r.lead = lead + r.state = StateFollower + r.logger.Infof("%x became follower at term %d", r.id, r.Term) +} + +func (r *raft) becomeCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> candidate]") + } + r.step = stepCandidate + r.reset(r.Term + 1) + r.tick = r.tickElection + r.Vote = r.id + r.state = StateCandidate + r.logger.Infof("%x became candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomePreCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> pre-candidate]") + } + // Becoming a pre-candidate changes our step functions and state, + // but doesn't change anything else. In particular it does not increase + // r.Term or change r.Vote. + r.step = stepCandidate + r.tick = r.tickElection + r.state = StatePreCandidate + r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomeLeader() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateFollower { + panic("invalid transition [follower -> leader]") + } + r.step = stepLeader + r.reset(r.Term) + r.tick = r.tickHeartbeat + r.lead = r.id + r.state = StateLeader + ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err) + } + + nconf := numOfPendingConf(ents) + if nconf > 1 { + panic("unexpected multiple uncommitted config entry") + } + if nconf == 1 { + r.pendingConf = true + } + + r.appendEntry(pb.Entry{Data: nil}) + r.logger.Infof("%x became leader at term %d", r.id, r.Term) +} + +func (r *raft) campaign(t CampaignType) { + var term uint64 + var voteMsg pb.MessageType + if t == campaignPreElection { + r.becomePreCandidate() + voteMsg = pb.MsgPreVote + // PreVote RPCs are sent for the next term before we've incremented r.Term. + term = r.Term + 1 + } else { + r.becomeCandidate() + voteMsg = pb.MsgVote + term = r.Term + } + if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) { + // We won the election after voting for ourselves (which must mean that + // this is a single-node cluster). Advance to the next state. + if t == campaignPreElection { + r.campaign(campaignElection) + } else { + r.becomeLeader() + } + return + } + for id := range r.prs { + if id == r.id { + continue + } + r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term) + + var ctx []byte + if t == campaignTransfer { + ctx = []byte(t) + } + r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) + } +} + +func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) { + if v { + r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term) + } else { + r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) + } + if _, ok := r.votes[id]; !ok { + r.votes[id] = v + } + for _, vv := range r.votes { + if vv { + granted++ + } + } + return granted +} + +func (r *raft) Step(m pb.Message) error { + // Handle the message term, which may result in our stepping down to a follower. + switch { + case m.Term == 0: + // local message + case m.Term > r.Term: + lead := m.From + if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + force := bytes.Equal(m.Context, []byte(campaignTransfer)) + inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout + if !force && inLease { + // If a server receives a RequestVote request within the minimum election timeout + // of hearing from a current leader, it does not update its term or grant its vote + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed) + return nil + } + lead = None + } + switch { + case m.Type == pb.MsgPreVote: + // Never change our term in response to a PreVote + case m.Type == pb.MsgPreVoteResp && !m.Reject: + // We send pre-vote requests with a term in our future. If the + // pre-vote is granted, we will increment our term when we get a + // quorum. If it is not, the term comes from the node that + // rejected our vote so we should become a follower at the new + // term. + default: + r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + r.becomeFollower(m.Term, lead) + } + + case m.Term < r.Term: + if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { + // We have received messages from a leader at a lower term. It is possible + // that these messages were simply delayed in the network, but this could + // also mean that this node has advanced its term number during a network + // partition, and it is now unable to either win an election or to rejoin + // the majority on the old term. If checkQuorum is false, this will be + // handled by incrementing term numbers in response to MsgVote with a + // higher term, but if checkQuorum is true we may not advance the term on + // MsgVote and must generate other messages to advance the term. The net + // result of these two features is to minimize the disruption caused by + // nodes that have been removed from the cluster's configuration: a + // removed node will send MsgVotes (or MsgPreVotes) which will be ignored, + // but it will not receive MsgApp or MsgHeartbeat, so it will not create + // disruptive term increases + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) + } else { + // ignore other cases + r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + } + return nil + } + + switch m.Type { + case pb.MsgHup: + if r.state != StateLeader { + ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) + } + if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied { + r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n) + return nil + } + + r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term) + if r.preVote { + r.campaign(campaignPreElection) + } else { + r.campaign(campaignElection) + } + } else { + r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) + } + + case pb.MsgVote, pb.MsgPreVote: + // The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should + // always equal r.Term. + if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type)}) + if m.Type == pb.MsgVote { + // Only record real votes. + r.electionElapsed = 0 + r.Vote = m.From + } + } else { + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type), Reject: true}) + } + + default: + r.step(r, m) + } + return nil +} + +type stepFunc func(r *raft, m pb.Message) + +func stepLeader(r *raft, m pb.Message) { + // These message types do not require any progress for m.From. + switch m.Type { + case pb.MsgBeat: + r.bcastHeartbeat() + return + case pb.MsgCheckQuorum: + if !r.checkQuorumActive() { + r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) + r.becomeFollower(r.Term, None) + } + return + case pb.MsgProp: + if len(m.Entries) == 0 { + r.logger.Panicf("%x stepped empty MsgProp", r.id) + } + if _, ok := r.prs[r.id]; !ok { + // If we are not currently a member of the range (i.e. this node + // was removed from the configuration while serving as leader), + // drop any new proposals. + return + } + if r.leadTransferee != None { + r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) + return + } + + for i, e := range m.Entries { + if e.Type == pb.EntryConfChange { + if r.pendingConf { + r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String()) + m.Entries[i] = pb.Entry{Type: pb.EntryNormal} + } + r.pendingConf = true + } + } + r.appendEntry(m.Entries...) + r.bcastAppend() + return + case pb.MsgReadIndex: + if r.quorum() > 1 { + if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term { + // Reject read only request when this leader has not committed any log entry at its term. + return + } + + // thinking: use an interally defined context instead of the user given context. + // We can express this in terms of the term and index instead of a user-supplied value. + // This would allow multiple reads to piggyback on the same message. + switch r.readOnly.option { + case ReadOnlySafe: + r.readOnly.addRequest(r.raftLog.committed, m) + r.bcastHeartbeatWithCtx(m.Entries[0].Data) + case ReadOnlyLeaseBased: + var ri uint64 + if r.checkQuorum { + ri = r.raftLog.committed + } + if m.From == None || m.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries}) + } + } + } else { + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } + + return + } + + // All other message types require a progress for m.From (pr). + pr, prOk := r.prs[m.From] + if !prOk { + r.logger.Debugf("%x no progress available for %x", r.id, m.From) + return + } + switch m.Type { + case pb.MsgAppResp: + pr.RecentActive = true + + if m.Reject { + r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d", + r.id, m.RejectHint, m.From, m.Index) + if pr.maybeDecrTo(m.Index, m.RejectHint) { + r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) + if pr.State == ProgressStateReplicate { + pr.becomeProbe() + } + r.sendAppend(m.From) + } + } else { + oldPaused := pr.IsPaused() + if pr.maybeUpdate(m.Index) { + switch { + case pr.State == ProgressStateProbe: + pr.becomeReplicate() + case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort(): + r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + pr.becomeProbe() + case pr.State == ProgressStateReplicate: + pr.ins.freeTo(m.Index) + } + + if r.maybeCommit() { + r.bcastAppend() + } else if oldPaused { + // update() reset the wait state on this node. If we had delayed sending + // an update before, send it now. + r.sendAppend(m.From) + } + // Transfer leadership is in progress. + if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { + r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) + r.sendTimeoutNow(m.From) + } + } + } + case pb.MsgHeartbeatResp: + pr.RecentActive = true + pr.resume() + + // free one slot for the full inflights window to allow progress. + if pr.State == ProgressStateReplicate && pr.ins.full() { + pr.ins.freeFirstOne() + } + if pr.Match < r.raftLog.lastIndex() { + r.sendAppend(m.From) + } + + if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { + return + } + + ackCount := r.readOnly.recvAck(m) + if ackCount < r.quorum() { + return + } + + rss := r.readOnly.advance(m) + for _, rs := range rss { + req := rs.req + if req.From == None || req.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data}) + } else { + r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries}) + } + } + case pb.MsgSnapStatus: + if pr.State != ProgressStateSnapshot { + return + } + if !m.Reject { + pr.becomeProbe() + r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } else { + pr.snapshotFailure() + pr.becomeProbe() + r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } + // If snapshot finish, wait for the msgAppResp from the remote node before sending + // out the next msgApp. + // If snapshot failure, wait for a heartbeat interval before next try + pr.pause() + case pb.MsgUnreachable: + // During optimistic replication, if the remote becomes unreachable, + // there is huge probability that a MsgApp is lost. + if pr.State == ProgressStateReplicate { + pr.becomeProbe() + } + r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) + case pb.MsgTransferLeader: + leadTransferee := m.From + lastLeadTransferee := r.leadTransferee + if lastLeadTransferee != None { + if lastLeadTransferee == leadTransferee { + r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x", + r.id, r.Term, leadTransferee, leadTransferee) + return + } + r.abortLeaderTransfer() + r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee) + } + if leadTransferee == r.id { + r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id) + return + } + // Transfer leadership to third party. + r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) + // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed. + r.electionElapsed = 0 + r.leadTransferee = leadTransferee + if pr.Match == r.raftLog.lastIndex() { + r.sendTimeoutNow(leadTransferee) + r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee) + } else { + r.sendAppend(leadTransferee) + } + } +} + +// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is +// whether they respond to MsgVoteResp or MsgPreVoteResp. +func stepCandidate(r *raft, m pb.Message) { + // Only handle vote responses corresponding to our candidacy (while in + // StateCandidate, we may get stale MsgPreVoteResp messages in this term from + // our pre-candidate state). + var myVoteRespType pb.MessageType + if r.state == StatePreCandidate { + myVoteRespType = pb.MsgPreVoteResp + } else { + myVoteRespType = pb.MsgVoteResp + } + switch m.Type { + case pb.MsgProp: + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return + case pb.MsgApp: + r.becomeFollower(r.Term, m.From) + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.becomeFollower(r.Term, m.From) + r.handleHeartbeat(m) + case pb.MsgSnap: + r.becomeFollower(m.Term, m.From) + r.handleSnapshot(m) + case myVoteRespType: + gr := r.poll(m.From, m.Type, !m.Reject) + r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr) + switch r.quorum() { + case gr: + if r.state == StatePreCandidate { + r.campaign(campaignElection) + } else { + r.becomeLeader() + r.bcastAppend() + } + case len(r.votes) - gr: + r.becomeFollower(r.Term, None) + } + case pb.MsgTimeoutNow: + r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) + } +} + +func stepFollower(r *raft, m pb.Message) { + switch m.Type { + case pb.MsgProp: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return + } + m.To = r.lead + r.send(m) + case pb.MsgApp: + r.electionElapsed = 0 + r.lead = m.From + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.electionElapsed = 0 + r.lead = m.From + r.handleHeartbeat(m) + case pb.MsgSnap: + r.electionElapsed = 0 + r.lead = m.From + r.handleSnapshot(m) + case pb.MsgTransferLeader: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term) + return + } + m.To = r.lead + r.send(m) + case pb.MsgTimeoutNow: + if r.promotable() { + r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From) + // Leadership transfers never use pre-vote even if r.preVote is true; we + // know we are not recovering from a partition so there is no need for the + // extra round trip. + r.campaign(campaignTransfer) + } else { + r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From) + } + case pb.MsgReadIndex: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term) + return + } + m.To = r.lead + r.send(m) + case pb.MsgReadIndexResp: + if len(m.Entries) != 1 { + r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries)) + return + } + r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) + } +} + +func (r *raft) handleAppendEntries(m pb.Message) { + if m.Index < r.raftLog.committed { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + return + } + + if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) + } else { + r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x", + r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) + } +} + +func (r *raft) handleHeartbeat(m pb.Message) { + r.raftLog.commitTo(m.Commit) + r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context}) +} + +func (r *raft) handleSnapshot(m pb.Message) { + sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term + if r.restore(m.Snapshot) { + r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) + } else { + r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + } +} + +// restore recovers the state machine from a snapshot. It restores the log and the +// configuration of state machine. +func (r *raft) restore(s pb.Snapshot) bool { + if s.Metadata.Index <= r.raftLog.committed { + return false + } + if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + r.raftLog.commitTo(s.Metadata.Index) + return false + } + + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + + r.raftLog.restore(s) + r.prs = make(map[uint64]*Progress) + for _, n := range s.Metadata.ConfState.Nodes { + match, next := uint64(0), r.raftLog.lastIndex()+1 + if n == r.id { + match = next - 1 + } + r.setProgress(n, match, next) + r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.prs[n]) + } + return true +} + +// promotable indicates whether state machine can be promoted to leader, +// which is true when its own id is in progress list. +func (r *raft) promotable() bool { + _, ok := r.prs[r.id] + return ok +} + +func (r *raft) addNode(id uint64) { + r.pendingConf = false + if _, ok := r.prs[id]; ok { + // Ignore any redundant addNode calls (which can happen because the + // initial bootstrapping entries are applied twice). + return + } + + r.setProgress(id, 0, r.raftLog.lastIndex()+1) + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has a chance to communicate with us. + r.prs[id].RecentActive = true +} + +func (r *raft) removeNode(id uint64) { + r.delProgress(id) + r.pendingConf = false + + // do not try to commit or abort transferring if there is no nodes in the cluster. + if len(r.prs) == 0 { + return + } + + // The quorum size is now smaller, so see if any pending entries can + // be committed. + if r.maybeCommit() { + r.bcastAppend() + } + // If the removed node is the leadTransferee, then abort the leadership transferring. + if r.state == StateLeader && r.leadTransferee == id { + r.abortLeaderTransfer() + } +} + +func (r *raft) resetPendingConf() { r.pendingConf = false } + +func (r *raft) setProgress(id, match, next uint64) { + r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)} +} + +func (r *raft) delProgress(id uint64) { + delete(r.prs, id) +} + +func (r *raft) loadState(state pb.HardState) { + if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { + r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) + } + r.raftLog.committed = state.Commit + r.Term = state.Term + r.Vote = state.Vote +} + +// pastElectionTimeout returns true iff r.electionElapsed is greater +// than or equal to the randomized election timeout in +// [electiontimeout, 2 * electiontimeout - 1]. +func (r *raft) pastElectionTimeout() bool { + return r.electionElapsed >= r.randomizedElectionTimeout +} + +func (r *raft) resetRandomizedElectionTimeout() { + r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) +} + +// checkQuorumActive returns true if the quorum is active from +// the view of the local raft state machine. Otherwise, it returns +// false. +// checkQuorumActive also resets all RecentActive to false. +func (r *raft) checkQuorumActive() bool { + var act int + + for id := range r.prs { + if id == r.id { // self is always active + act++ + continue + } + + if r.prs[id].RecentActive { + act++ + } + + r.prs[id].RecentActive = false + } + + return act >= r.quorum() +} + +func (r *raft) sendTimeoutNow(to uint64) { + r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) +} + +func (r *raft) abortLeaderTransfer() { + r.leadTransferee = None +} + +func numOfPendingConf(ents []pb.Entry) int { + n := 0 + for i := range ents { + if ents[i].Type == pb.EntryConfChange { + n++ + } + } + return n +} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/BUILD b/vendor/github.com/coreos/etcd/raft/raftpb/BUILD new file mode 100644 index 000000000000..26368bcf57ad --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raftpb/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["raft.pb.go"], + importpath = "github.com/coreos/etcd/raft/raftpb", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go new file mode 100644 index 000000000000..4c6e79d58a0c --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -0,0 +1,1942 @@ +// Code generated by protoc-gen-gogo. +// source: raft.proto +// DO NOT EDIT! + +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange +*/ +package raftpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", +} +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Entry struct { + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Message struct { + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ConfState struct { + Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ConfChange struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` + NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` + Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, num := range m.Nodes { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.Context != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Raft(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Raft(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Index)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + l = m.ConfState.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 1 + sovRaft(uint64(m.Index)) + n += 1 + sovRaft(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovRaft(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.To)) + n += 1 + sovRaft(uint64(m.From)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.LogTerm)) + n += 1 + sovRaft(uint64(m.Index)) + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + n += 1 + sovRaft(uint64(m.Commit)) + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 2 + n += 1 + sovRaft(uint64(m.RejectHint)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Vote)) + n += 1 + sovRaft(uint64(m.Commit)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + n += 1 + sovRaft(uint64(e)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.ID)) + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 790 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e, + 0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc, + 0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79, + 0x80, 0x3c, 0x40, 0x2e, 0x79, 0x1f, 0x1f, 0x0d, 0xe4, 0x1e, 0xc4, 0xce, 0x8b, 0x04, 0xbb, 0x5c, + 0x4a, 0x94, 0x74, 0xdb, 0xf9, 0xbe, 0xe1, 0xcc, 0x37, 0xdf, 0xce, 0x12, 0x40, 0xd0, 0xa9, 0x3c, + 0x8e, 0x04, 0x97, 0x1c, 0x17, 0xd5, 0x39, 0xba, 0xde, 0x6f, 0xf8, 0xdc, 0xe7, 0x1a, 0xfa, 0x4d, + 0x9d, 0x12, 0xb6, 0xfd, 0x0e, 0x0a, 0x7f, 0x87, 0x52, 0xdc, 0xe3, 0x5f, 0xc1, 0x19, 0xdf, 0x47, + 0x8c, 0x58, 0x2d, 0xab, 0x53, 0xeb, 0xd6, 0x8f, 0x93, 0xaf, 0x8e, 0x35, 0xa9, 0x88, 0x53, 0xe7, + 0xe1, 0xcb, 0x4f, 0xb9, 0x91, 0x4e, 0xc2, 0x04, 0x9c, 0x31, 0x13, 0x01, 0xb1, 0x5b, 0x56, 0xc7, + 0x59, 0x32, 0x4c, 0x04, 0x78, 0x1f, 0x0a, 0x83, 0xd0, 0x63, 0x77, 0x24, 0x9f, 0xa1, 0x12, 0x08, + 0x63, 0x70, 0xfa, 0x54, 0x52, 0xe2, 0xb4, 0xac, 0x4e, 0x75, 0xa4, 0xcf, 0xed, 0xf7, 0x16, 0xa0, + 0xcb, 0x90, 0x46, 0xf1, 0x8c, 0xcb, 0x21, 0x93, 0xd4, 0xa3, 0x92, 0xe2, 0x3f, 0x01, 0x26, 0x3c, + 0x9c, 0xbe, 0x8a, 0x25, 0x95, 0x89, 0x22, 0x77, 0xa5, 0xa8, 0xc7, 0xc3, 0xe9, 0xa5, 0x22, 0x4c, + 0xf1, 0xca, 0x24, 0x05, 0x54, 0xf3, 0xb9, 0x6e, 0x9e, 0xd5, 0x95, 0x40, 0x4a, 0xb2, 0x54, 0x92, + 0xb3, 0xba, 0x34, 0xd2, 0xfe, 0x1f, 0xca, 0xa9, 0x02, 0x25, 0x51, 0x29, 0xd0, 0x3d, 0xab, 0x23, + 0x7d, 0xc6, 0x7f, 0x41, 0x39, 0x30, 0xca, 0x74, 0x61, 0xb7, 0x4b, 0x52, 0x2d, 0x9b, 0xca, 0x4d, + 0xdd, 0x65, 0x7e, 0xfb, 0x53, 0x1e, 0x4a, 0x43, 0x16, 0xc7, 0xd4, 0x67, 0xf8, 0x08, 0x1c, 0xb9, + 0x72, 0x78, 0x2f, 0xad, 0x61, 0xe8, 0xac, 0xc7, 0x2a, 0x0d, 0x37, 0xc0, 0x96, 0x7c, 0x6d, 0x12, + 0x5b, 0x72, 0x35, 0xc6, 0x54, 0xf0, 0x8d, 0x31, 0x14, 0xb2, 0x1c, 0xd0, 0xd9, 0x1c, 0x10, 0x37, + 0xa1, 0x74, 0xc3, 0x7d, 0x7d, 0x61, 0x85, 0x0c, 0x99, 0x82, 0x2b, 0xdb, 0x8a, 0xdb, 0xb6, 0x1d, + 0x41, 0x89, 0x85, 0x52, 0xcc, 0x59, 0x4c, 0x4a, 0xad, 0x7c, 0xc7, 0xed, 0xee, 0xac, 0x6d, 0x46, + 0x5a, 0xca, 0xe4, 0xe0, 0x03, 0x28, 0x4e, 0x78, 0x10, 0xcc, 0x25, 0x29, 0x67, 0x6a, 0x19, 0x0c, + 0x77, 0xa1, 0x1c, 0x1b, 0xc7, 0x48, 0x45, 0x3b, 0x89, 0x36, 0x9d, 0x4c, 0x1d, 0x4c, 0xf3, 0x54, + 0x45, 0xc1, 0x5e, 0xb3, 0x89, 0x24, 0xd0, 0xb2, 0x3a, 0xe5, 0xb4, 0x62, 0x82, 0xe1, 0x5f, 0x00, + 0x92, 0xd3, 0xd9, 0x3c, 0x94, 0xc4, 0xcd, 0xf4, 0xcc, 0xe0, 0x98, 0x40, 0x69, 0xc2, 0x43, 0xc9, + 0xee, 0x24, 0xa9, 0xea, 0x8b, 0x4d, 0xc3, 0xf6, 0x4b, 0xa8, 0x9c, 0x51, 0xe1, 0x25, 0xeb, 0x93, + 0x3a, 0x68, 0x6d, 0x39, 0x48, 0xc0, 0xb9, 0xe5, 0x92, 0xad, 0xef, 0xbb, 0x42, 0x32, 0x03, 0xe7, + 0xb7, 0x07, 0x6e, 0xff, 0x0c, 0x95, 0xe5, 0xba, 0xe2, 0x06, 0x14, 0x42, 0xee, 0xb1, 0x98, 0x58, + 0xad, 0x7c, 0xc7, 0x19, 0x25, 0x41, 0xfb, 0x83, 0x05, 0xa0, 0x72, 0x7a, 0x33, 0x1a, 0xfa, 0xfa, + 0xd6, 0x07, 0xfd, 0x35, 0x05, 0xf6, 0xa0, 0x8f, 0x7f, 0x37, 0x8f, 0xd3, 0xd6, 0xab, 0xf3, 0x63, + 0xf6, 0x29, 0x24, 0xdf, 0x6d, 0xbd, 0xd0, 0x03, 0x28, 0x9e, 0x73, 0x8f, 0x0d, 0xfa, 0xeb, 0xba, + 0x12, 0x4c, 0x19, 0xd2, 0x33, 0x86, 0x24, 0x8f, 0x31, 0x0d, 0x0f, 0xff, 0x80, 0xca, 0xf2, 0xc9, + 0xe3, 0x5d, 0x70, 0x75, 0x70, 0xce, 0x45, 0x40, 0x6f, 0x50, 0x0e, 0xef, 0xc1, 0xae, 0x06, 0x56, + 0x8d, 0x91, 0x75, 0xf8, 0xd9, 0x06, 0x37, 0xb3, 0xc4, 0x18, 0xa0, 0x38, 0x8c, 0xfd, 0xb3, 0x45, + 0x84, 0x72, 0xd8, 0x85, 0xd2, 0x30, 0xf6, 0x4f, 0x19, 0x95, 0xc8, 0x32, 0xc1, 0x85, 0xe0, 0x11, + 0xb2, 0x4d, 0xd6, 0x49, 0x14, 0xa1, 0x3c, 0xae, 0x01, 0x24, 0xe7, 0x11, 0x8b, 0x23, 0xe4, 0x98, + 0xc4, 0xff, 0xb8, 0x64, 0xa8, 0xa0, 0x44, 0x98, 0x40, 0xb3, 0x45, 0xc3, 0xaa, 0x85, 0x41, 0x25, + 0x8c, 0xa0, 0xaa, 0x9a, 0x31, 0x2a, 0xe4, 0xb5, 0xea, 0x52, 0xc6, 0x0d, 0x40, 0x59, 0x44, 0x7f, + 0x54, 0xc1, 0x18, 0x6a, 0xc3, 0xd8, 0xbf, 0x0a, 0x05, 0xa3, 0x93, 0x19, 0xbd, 0xbe, 0x61, 0x08, + 0x70, 0x1d, 0x76, 0x4c, 0x21, 0x75, 0x41, 0x8b, 0x18, 0xb9, 0x26, 0xad, 0x37, 0x63, 0x93, 0x37, + 0xff, 0x2e, 0xb8, 0x58, 0x04, 0xa8, 0x8a, 0x7f, 0x80, 0xfa, 0x30, 0xf6, 0xc7, 0x82, 0x86, 0xf1, + 0x94, 0x89, 0x7f, 0x18, 0xf5, 0x98, 0x40, 0x3b, 0xe6, 0xeb, 0xf1, 0x3c, 0x60, 0x7c, 0x21, 0xcf, + 0xf9, 0x5b, 0x54, 0x33, 0x62, 0x46, 0x8c, 0x7a, 0xfa, 0x87, 0x87, 0x76, 0x8d, 0x98, 0x25, 0xa2, + 0xc5, 0x20, 0x33, 0xef, 0x85, 0x60, 0x7a, 0xc4, 0xba, 0xe9, 0x6a, 0x62, 0x9d, 0x83, 0x0f, 0x5f, + 0x40, 0x6d, 0xfd, 0x7a, 0x95, 0x8e, 0x15, 0x72, 0xe2, 0x79, 0xea, 0x2e, 0x51, 0x0e, 0x13, 0x68, + 0xac, 0xe0, 0x11, 0x0b, 0xf8, 0x2d, 0xd3, 0x8c, 0xb5, 0xce, 0x5c, 0x45, 0x1e, 0x95, 0x09, 0x63, + 0x9f, 0x92, 0x87, 0xa7, 0x66, 0xee, 0xf1, 0xa9, 0x99, 0x7b, 0x78, 0x6e, 0x5a, 0x8f, 0xcf, 0x4d, + 0xeb, 0xeb, 0x73, 0xd3, 0xfa, 0xf8, 0xad, 0x99, 0xfb, 0x1e, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x30, + 0x01, 0x41, 0x3a, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto new file mode 100644 index 000000000000..806a43634fdc --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto @@ -0,0 +1,93 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +message ConfState { + repeated uint64 nodes = 1; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; +} + +message ConfChange { + optional uint64 ID = 1 [(gogoproto.nullable) = false]; + optional ConfChangeType Type = 2 [(gogoproto.nullable) = false]; + optional uint64 NodeID = 3 [(gogoproto.nullable) = false]; + optional bytes Context = 4; +} diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go new file mode 100644 index 000000000000..b950d5169a54 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/rawnode.go @@ -0,0 +1,264 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// ErrStepLocalMsg is returned when try to step a local raft message +var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") + +// ErrStepPeerNotFound is returned when try to step a response message +// but there is no peer found in raft.prs for that node. +var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") + +// RawNode is a thread-unsafe Node. +// The methods of this struct correspond to the methods of Node and are described +// more fully there. +type RawNode struct { + raft *raft + prevSoftSt *SoftState + prevHardSt pb.HardState +} + +func (rn *RawNode) newReady() Ready { + return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) +} + +func (rn *RawNode) commitReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + if rn.prevHardSt.Commit != 0 { + // In most cases, prevHardSt and rd.HardState will be the same + // because when there are new entries to apply we just sent a + // HardState with an updated Commit value. However, on initial + // startup the two are different because we don't send a HardState + // until something changes, but we do send any un-applied but + // committed entries (and previously-committed entries may be + // incorporated into the snapshot, even if rd.CommittedEntries is + // empty). Therefore we mark all committed entries as applied + // whether they were included in rd.HardState or not. + rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit) + } + if len(rd.Entries) > 0 { + e := rd.Entries[len(rd.Entries)-1] + rn.raft.raftLog.stableTo(e.Index, e.Term) + } + if !IsEmptySnap(rd.Snapshot) { + rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) + } + if len(rd.ReadStates) != 0 { + rn.raft.readStates = nil + } +} + +// NewRawNode returns a new RawNode given configuration and a list of raft peers. +func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { + if config.ID == 0 { + panic("config.ID must not be zero") + } + r := newRaft(config) + rn := &RawNode{ + raft: r, + } + lastIndex, err := config.Storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + // If the log is empty, this is a new RawNode (like StartNode); otherwise it's + // restoring an existing RawNode (like RestartNode). + // TODO(bdarnell): rethink RawNode initialization and whether the application needs + // to be able to tell us when it expects the RawNode to exist. + if lastIndex == 0 { + r.becomeFollower(1, None) + ents := make([]pb.Entry, len(peers)) + for i, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + data, err := cc.Marshal() + if err != nil { + panic("unexpected marshal error") + } + + ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} + } + r.raftLog.append(ents...) + r.raftLog.committed = uint64(len(ents)) + for _, peer := range peers { + r.addNode(peer.ID) + } + } + + // Set the initial hard and soft states after performing all initialization. + rn.prevSoftSt = r.softState() + if lastIndex == 0 { + rn.prevHardSt = emptyState + } else { + rn.prevHardSt = r.hardState() + } + + return rn, nil +} + +// Tick advances the internal logical clock by a single tick. +func (rn *RawNode) Tick() { + rn.raft.tick() +} + +// TickQuiesced advances the internal logical clock by a single tick without +// performing any other state machine processing. It allows the caller to avoid +// periodic heartbeats and elections when all of the peers in a Raft group are +// known to be at the same state. Expected usage is to periodically invoke Tick +// or TickQuiesced depending on whether the group is "active" or "quiesced". +// +// WARNING: Be very careful about using this method as it subverts the Raft +// state machine. You should probably be using Tick instead. +func (rn *RawNode) TickQuiesced() { + rn.raft.electionElapsed++ +} + +// Campaign causes this RawNode to transition to candidate state. +func (rn *RawNode) Campaign() error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgHup, + }) +} + +// Propose proposes data be appended to the raft log. +func (rn *RawNode) Propose(data []byte) error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + From: rn.raft.id, + Entries: []pb.Entry{ + {Data: data}, + }}) +} + +// ProposeConfChange proposes a config change. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { + data, err := cc.Marshal() + if err != nil { + return err + } + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + Entries: []pb.Entry{ + {Type: pb.EntryConfChange, Data: data}, + }, + }) +} + +// ApplyConfChange applies a config change to the local node. +func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { + if cc.NodeID == None { + rn.raft.resetPendingConf() + return &pb.ConfState{Nodes: rn.raft.nodes()} + } + switch cc.Type { + case pb.ConfChangeAddNode: + rn.raft.addNode(cc.NodeID) + case pb.ConfChangeRemoveNode: + rn.raft.removeNode(cc.NodeID) + case pb.ConfChangeUpdateNode: + rn.raft.resetPendingConf() + default: + panic("unexpected conf type") + } + return &pb.ConfState{Nodes: rn.raft.nodes()} +} + +// Step advances the state machine using the given message. +func (rn *RawNode) Step(m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + return ErrStepLocalMsg + } + if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m.Type) { + return rn.raft.Step(m) + } + return ErrStepPeerNotFound +} + +// Ready returns the current point-in-time state of this RawNode. +func (rn *RawNode) Ready() Ready { + rd := rn.newReady() + rn.raft.msgs = nil + return rd +} + +// HasReady called when RawNode user need to check if any Ready pending. +// Checking logic in this method should be consistent with Ready.containsUpdates(). +func (rn *RawNode) HasReady() bool { + r := rn.raft + if !r.softState().equal(rn.prevSoftSt) { + return true + } + if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { + return true + } + if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) { + return true + } + if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { + return true + } + if len(r.readStates) != 0 { + return true + } + return false +} + +// Advance notifies the RawNode that the application has applied and saved progress in the +// last Ready results. +func (rn *RawNode) Advance(rd Ready) { + rn.commitReady(rd) +} + +// Status returns the current status of the given group. +func (rn *RawNode) Status() *Status { + status := getStatus(rn.raft) + return &status +} + +// ReportUnreachable reports the given node is not reachable for the last send. +func (rn *RawNode) ReportUnreachable(id uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) +} + +// ReportSnapshot reports the status of the sent snapshot. +func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) +} + +// TransferLeader tries to transfer leadership to the given transferee. +func (rn *RawNode) TransferLeader(transferee uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) +} + +// ReadIndex requests a read state. The read state will be set in ready. +// Read State has a read index. Once the application advances further than the read +// index, any linearizable read requests issued before the read request can be +// processed safely. The read state will have the same rctx attached. +func (rn *RawNode) ReadIndex(rctx []byte) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go new file mode 100644 index 000000000000..d0085237e36f --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/read_only.go @@ -0,0 +1,118 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "github.com/coreos/etcd/raft/raftpb" + +// ReadState provides state for read only query. +// It's caller's responsibility to call ReadIndex first before getting +// this state from ready, It's also caller's duty to differentiate if this +// state is what it requests through RequestCtx, eg. given a unique id as +// RequestCtx +type ReadState struct { + Index uint64 + RequestCtx []byte +} + +type readIndexStatus struct { + req pb.Message + index uint64 + acks map[uint64]struct{} +} + +type readOnly struct { + option ReadOnlyOption + pendingReadIndex map[string]*readIndexStatus + readIndexQueue []string +} + +func newReadOnly(option ReadOnlyOption) *readOnly { + return &readOnly{ + option: option, + pendingReadIndex: make(map[string]*readIndexStatus), + } +} + +// addRequest adds a read only reuqest into readonly struct. +// `index` is the commit index of the raft state machine when it received +// the read only request. +// `m` is the original read only request message from the local or remote node. +func (ro *readOnly) addRequest(index uint64, m pb.Message) { + ctx := string(m.Entries[0].Data) + if _, ok := ro.pendingReadIndex[ctx]; ok { + return + } + ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})} + ro.readIndexQueue = append(ro.readIndexQueue, ctx) +} + +// recvAck notifies the readonly struct that the raft state machine received +// an acknowledgment of the heartbeat that attached with the read only request +// context. +func (ro *readOnly) recvAck(m pb.Message) int { + rs, ok := ro.pendingReadIndex[string(m.Context)] + if !ok { + return 0 + } + + rs.acks[m.From] = struct{}{} + // add one to include an ack from local node + return len(rs.acks) + 1 +} + +// advance advances the read only request queue kept by the readonly struct. +// It dequeues the requests until it finds the read only request that has +// the same context as the given `m`. +func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { + var ( + i int + found bool + ) + + ctx := string(m.Context) + rss := []*readIndexStatus{} + + for _, okctx := range ro.readIndexQueue { + i++ + rs, ok := ro.pendingReadIndex[okctx] + if !ok { + panic("cannot find corresponding read state from pending map") + } + rss = append(rss, rs) + if okctx == ctx { + found = true + break + } + } + + if found { + ro.readIndexQueue = ro.readIndexQueue[i:] + for _, rs := range rss { + delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data)) + } + return rss + } + + return nil +} + +// lastPendingRequestCtx returns the context of the last pending read only +// request in readonly struct. +func (ro *readOnly) lastPendingRequestCtx() string { + if len(ro.readIndexQueue) == 0 { + return "" + } + return ro.readIndexQueue[len(ro.readIndexQueue)-1] +} diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go new file mode 100644 index 000000000000..b690fa56b95e --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/status.go @@ -0,0 +1,76 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +type Status struct { + ID uint64 + + pb.HardState + SoftState + + Applied uint64 + Progress map[uint64]Progress +} + +// getStatus gets a copy of the current raft status. +func getStatus(r *raft) Status { + s := Status{ID: r.id} + s.HardState = r.hardState() + s.SoftState = *r.softState() + + s.Applied = r.raftLog.applied + + if s.RaftState == StateLeader { + s.Progress = make(map[uint64]Progress) + for id, p := range r.prs { + s.Progress[id] = *p + } + } + + return s +} + +// MarshalJSON translates the raft status into JSON. +// TODO: try to simplify this by introducing ID type into raft +func (s Status) MarshalJSON() ([]byte, error) { + j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`, + s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState) + + if len(s.Progress) == 0 { + j += "}}" + } else { + for k, v := range s.Progress { + subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) + j += subj + } + // remove the trailing "," + j = j[:len(j)-1] + "}}" + } + return []byte(j), nil +} + +func (s Status) String() string { + b, err := s.MarshalJSON() + if err != nil { + raftLogger.Panicf("unexpected error: %v", err) + } + return string(b) +} diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go new file mode 100644 index 000000000000..69c3a7d90338 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/storage.go @@ -0,0 +1,271 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + "sync" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// ErrCompacted is returned by Storage.Entries/Compact when a requested +// index is unavailable because it predates the last snapshot. +var ErrCompacted = errors.New("requested index is unavailable due to compaction") + +// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested +// index is older than the existing snapshot. +var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") + +// ErrUnavailable is returned by Storage interface when the requested log entries +// are unavailable. +var ErrUnavailable = errors.New("requested entry at index is unavailable") + +// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required +// snapshot is temporarily unavailable. +var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") + +// Storage is an interface that may be implemented by the application +// to retrieve log entries from storage. +// +// If any Storage method returns an error, the raft instance will +// become inoperable and refuse to participate in elections; the +// application is responsible for cleanup and recovery in this case. +type Storage interface { + // InitialState returns the saved HardState and ConfState information. + InitialState() (pb.HardState, pb.ConfState, error) + // Entries returns a slice of log entries in the range [lo,hi). + // MaxSize limits the total size of the log entries returned, but + // Entries returns at least one entry if any. + Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) + // Term returns the term of entry i, which must be in the range + // [FirstIndex()-1, LastIndex()]. The term of the entry before + // FirstIndex is retained for matching purposes even though the + // rest of that entry may not be available. + Term(i uint64) (uint64, error) + // LastIndex returns the index of the last entry in the log. + LastIndex() (uint64, error) + // FirstIndex returns the index of the first log entry that is + // possibly available via Entries (older entries have been incorporated + // into the latest Snapshot; if storage only contains the dummy entry the + // first log entry is not available). + FirstIndex() (uint64, error) + // Snapshot returns the most recent snapshot. + // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, + // so raft state machine could know that Storage needs some time to prepare + // snapshot and call Snapshot later. + Snapshot() (pb.Snapshot, error) +} + +// MemoryStorage implements the Storage interface backed by an +// in-memory array. +type MemoryStorage struct { + // Protects access to all fields. Most methods of MemoryStorage are + // run on the raft goroutine, but Append() is run on an application + // goroutine. + sync.Mutex + + hardState pb.HardState + snapshot pb.Snapshot + // ents[i] has raft log position i+snapshot.Metadata.Index + ents []pb.Entry +} + +// NewMemoryStorage creates an empty MemoryStorage. +func NewMemoryStorage() *MemoryStorage { + return &MemoryStorage{ + // When starting from scratch populate the list with a dummy entry at term zero. + ents: make([]pb.Entry, 1), + } +} + +// InitialState implements the Storage interface. +func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { + return ms.hardState, ms.snapshot.Metadata.ConfState, nil +} + +// SetHardState saves the current HardState. +func (ms *MemoryStorage) SetHardState(st pb.HardState) error { + ms.Lock() + defer ms.Unlock() + ms.hardState = st + return nil +} + +// Entries implements the Storage interface. +func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if lo <= offset { + return nil, ErrCompacted + } + if hi > ms.lastIndex()+1 { + raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) + } + // only contains dummy entries. + if len(ms.ents) == 1 { + return nil, ErrUnavailable + } + + ents := ms.ents[lo-offset : hi-offset] + return limitSize(ents, maxSize), nil +} + +// Term implements the Storage interface. +func (ms *MemoryStorage) Term(i uint64) (uint64, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if i < offset { + return 0, ErrCompacted + } + if int(i-offset) >= len(ms.ents) { + return 0, ErrUnavailable + } + return ms.ents[i-offset].Term, nil +} + +// LastIndex implements the Storage interface. +func (ms *MemoryStorage) LastIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.lastIndex(), nil +} + +func (ms *MemoryStorage) lastIndex() uint64 { + return ms.ents[0].Index + uint64(len(ms.ents)) - 1 +} + +// FirstIndex implements the Storage interface. +func (ms *MemoryStorage) FirstIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.firstIndex(), nil +} + +func (ms *MemoryStorage) firstIndex() uint64 { + return ms.ents[0].Index + 1 +} + +// Snapshot implements the Storage interface. +func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + return ms.snapshot, nil +} + +// ApplySnapshot overwrites the contents of this Storage object with +// those of the given snapshot. +func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { + ms.Lock() + defer ms.Unlock() + + //handle check for old snapshot being applied + msIndex := ms.snapshot.Metadata.Index + snapIndex := snap.Metadata.Index + if msIndex >= snapIndex { + return ErrSnapOutOfDate + } + + ms.snapshot = snap + ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} + return nil +} + +// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and +// can be used to reconstruct the state at that point. +// If any configuration changes have been made since the last compaction, +// the result of the last ApplyConfChange must be passed in. +func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + if i <= ms.snapshot.Metadata.Index { + return pb.Snapshot{}, ErrSnapOutOfDate + } + + offset := ms.ents[0].Index + if i > ms.lastIndex() { + raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) + } + + ms.snapshot.Metadata.Index = i + ms.snapshot.Metadata.Term = ms.ents[i-offset].Term + if cs != nil { + ms.snapshot.Metadata.ConfState = *cs + } + ms.snapshot.Data = data + return ms.snapshot, nil +} + +// Compact discards all log entries prior to compactIndex. +// It is the application's responsibility to not attempt to compact an index +// greater than raftLog.applied. +func (ms *MemoryStorage) Compact(compactIndex uint64) error { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if compactIndex <= offset { + return ErrCompacted + } + if compactIndex > ms.lastIndex() { + raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) + } + + i := compactIndex - offset + ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) + ents[0].Index = ms.ents[i].Index + ents[0].Term = ms.ents[i].Term + ents = append(ents, ms.ents[i+1:]...) + ms.ents = ents + return nil +} + +// Append the new entries to storage. +// TODO (xiangli): ensure the entries are continuous and +// entries[0].Index > ms.entries[0].Index +func (ms *MemoryStorage) Append(entries []pb.Entry) error { + if len(entries) == 0 { + return nil + } + + ms.Lock() + defer ms.Unlock() + + first := ms.firstIndex() + last := entries[0].Index + uint64(len(entries)) - 1 + + // shortcut if there is no new entry. + if last < first { + return nil + } + // truncate compacted entries + if first > entries[0].Index { + entries = entries[first-entries[0].Index:] + } + + offset := entries[0].Index - ms.ents[0].Index + switch { + case uint64(len(ms.ents)) > offset: + ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) + ms.ents = append(ms.ents, entries...) + case uint64(len(ms.ents)) == offset: + ms.ents = append(ms.ents, entries...) + default: + raftLogger.Panicf("missing log entry [last: %d, append at: %d]", + ms.lastIndex(), entries[0].Index) + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go new file mode 100644 index 000000000000..f4141fe65ddf --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/util.go @@ -0,0 +1,129 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +func (st StateType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", st.String())), nil +} + +// uint64Slice implements sort interface +type uint64Slice []uint64 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || + msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum +} + +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp +} + +// voteResponseType maps vote and prevote message types to their corresponding responses. +func voteRespMsgType(msgt pb.MessageType) pb.MessageType { + switch msgt { + case pb.MsgVote: + return pb.MsgVoteResp + case pb.MsgPreVote: + return pb.MsgPreVoteResp + default: + panic(fmt.Sprintf("not a vote message: %s", msgt)) + } +} + +// EntryFormatter can be implemented by the application to provide human-readable formatting +// of entry data. Nil is a valid EntryFormatter and will use a default format. +type EntryFormatter func([]byte) string + +// DescribeMessage returns a concise human-readable description of a +// Message for debugging. +func DescribeMessage(m pb.Message, f EntryFormatter) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + if m.Reject { + fmt.Fprintf(&buf, " Rejected") + if m.RejectHint != 0 { + fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint) + } + } + if m.Commit != 0 { + fmt.Fprintf(&buf, " Commit:%d", m.Commit) + } + if len(m.Entries) > 0 { + fmt.Fprintf(&buf, " Entries:[") + for i, e := range m.Entries { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeEntry(e, f)) + } + fmt.Fprintf(&buf, "]") + } + if !IsEmptySnap(m.Snapshot) { + fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot) + } + return buf.String() +} + +// DescribeEntry returns a concise human-readable description of an +// Entry for debugging. +func DescribeEntry(e pb.Entry, f EntryFormatter) string { + var formatted string + if e.Type == pb.EntryNormal && f != nil { + formatted = f(e.Data) + } else { + formatted = fmt.Sprintf("%q", e.Data) + } + return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted) +} + +func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { + if len(ents) == 0 { + return ents + } + size := ents[0].Size() + var limit int + for limit = 1; limit < len(ents); limit++ { + size += ents[limit].Size() + if uint64(size) > maxSize { + break + } + } + return ents[:limit] +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/BUILD b/vendor/github.com/coreos/etcd/rafthttp/BUILD new file mode 100644 index 000000000000..88e827077837 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/BUILD @@ -0,0 +1,57 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "coder.go", + "doc.go", + "http.go", + "metrics.go", + "msg_codec.go", + "msgappv2_codec.go", + "peer.go", + "peer_status.go", + "pipeline.go", + "probing_status.go", + "remote.go", + "snapshot_sender.go", + "stream.go", + "transport.go", + "urlpick.go", + "util.go", + ], + importpath = "github.com/coreos/etcd/rafthttp", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/ioutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", + "//vendor/github.com/coreos/etcd/snap:go_default_library", + "//vendor/github.com/coreos/etcd/version:go_default_library", + "//vendor/github.com/coreos/go-semver/semver:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/xiang90/probing:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/rafthttp/coder.go b/vendor/github.com/coreos/etcd/rafthttp/coder.go new file mode 100644 index 000000000000..86ede972e1f1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/coder.go @@ -0,0 +1,27 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import "github.com/coreos/etcd/raft/raftpb" + +type encoder interface { + // encode encodes the given message to an output stream. + encode(m *raftpb.Message) error +} + +type decoder interface { + // decode decodes the message from an input stream. + decode() (raftpb.Message, error) +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/doc.go b/vendor/github.com/coreos/etcd/rafthttp/doc.go new file mode 100644 index 000000000000..a9486a8bb664 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rafthttp implements HTTP transportation layer for etcd/raft pkg. +package rafthttp diff --git a/vendor/github.com/coreos/etcd/rafthttp/http.go b/vendor/github.com/coreos/etcd/rafthttp/http.go new file mode 100644 index 000000000000..55df26e9b756 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/http.go @@ -0,0 +1,359 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "path" + "strings" + + pioutil "github.com/coreos/etcd/pkg/ioutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" + "github.com/coreos/etcd/version" + "golang.org/x/net/context" +) + +const ( + // connReadLimitByte limits the number of bytes + // a single read can read out. + // + // 64KB should be large enough for not causing + // throughput bottleneck as well as small enough + // for not causing a read timeout. + connReadLimitByte = 64 * 1024 +) + +var ( + RaftPrefix = "/raft" + ProbingPrefix = path.Join(RaftPrefix, "probing") + RaftStreamPrefix = path.Join(RaftPrefix, "stream") + RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot") + + errIncompatibleVersion = errors.New("incompatible version") + errClusterIDMismatch = errors.New("cluster ID mismatch") +) + +type peerGetter interface { + Get(id types.ID) Peer +} + +type writerToResponse interface { + WriteTo(w http.ResponseWriter) +} + +type pipelineHandler struct { + tr Transporter + r Raft + cid types.ID +} + +// newPipelineHandler returns a handler for handling raft messages +// from pipeline for RaftPrefix. +// +// The handler reads out the raft message from request body, +// and forwards it to the given raft state machine for processing. +func newPipelineHandler(tr Transporter, r Raft, cid types.ID) http.Handler { + return &pipelineHandler{ + tr: tr, + r: r, + cid: cid, + } +} + +func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.Header().Set("Allow", "POST") + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) + + if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + + if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err != nil { + if urls := r.Header.Get("X-PeerURLs"); urls != "" { + h.tr.AddRemote(from, strings.Split(urls, ",")) + } + } + + // Limit the data size that could be read from the request body, which ensures that read from + // connection will not time out accidentally due to possible blocking in underlying implementation. + limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte) + b, err := ioutil.ReadAll(limitedr) + if err != nil { + plog.Errorf("failed to read raft message (%v)", err) + http.Error(w, "error reading raft message", http.StatusBadRequest) + recvFailures.WithLabelValues(r.RemoteAddr).Inc() + return + } + + var m raftpb.Message + if err := m.Unmarshal(b); err != nil { + plog.Errorf("failed to unmarshal raft message (%v)", err) + http.Error(w, "error unmarshaling raft message", http.StatusBadRequest) + recvFailures.WithLabelValues(r.RemoteAddr).Inc() + return + } + + receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(len(b))) + + if err := h.r.Process(context.TODO(), m); err != nil { + switch v := err.(type) { + case writerToResponse: + v.WriteTo(w) + default: + plog.Warningf("failed to process raft message (%v)", err) + http.Error(w, "error processing raft message", http.StatusInternalServerError) + w.(http.Flusher).Flush() + // disconnect the http stream + panic(err) + } + return + } + + // Write StatusNoContent header after the message has been processed by + // raft, which facilitates the client to report MsgSnap status. + w.WriteHeader(http.StatusNoContent) +} + +type snapshotHandler struct { + tr Transporter + r Raft + snapshotter *snap.Snapshotter + cid types.ID +} + +func newSnapshotHandler(tr Transporter, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler { + return &snapshotHandler{ + tr: tr, + r: r, + snapshotter: snapshotter, + cid: cid, + } +} + +// ServeHTTP serves HTTP request to receive and process snapshot message. +// +// If request sender dies without closing underlying TCP connection, +// the handler will keep waiting for the request body until TCP keepalive +// finds out that the connection is broken after several minutes. +// This is acceptable because +// 1. snapshot messages sent through other TCP connections could still be +// received and processed. +// 2. this case should happen rarely, so no further optimization is done. +func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.Header().Set("Allow", "POST") + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) + + if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + + if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err != nil { + if urls := r.Header.Get("X-PeerURLs"); urls != "" { + h.tr.AddRemote(from, strings.Split(urls, ",")) + } + } + + dec := &messageDecoder{r: r.Body} + // let snapshots be very large since they can exceed 512MB for large installations + m, err := dec.decodeLimit(uint64(1 << 63)) + if err != nil { + msg := fmt.Sprintf("failed to decode raft message (%v)", err) + plog.Errorf(msg) + http.Error(w, msg, http.StatusBadRequest) + recvFailures.WithLabelValues(r.RemoteAddr).Inc() + return + } + + receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size())) + + if m.Type != raftpb.MsgSnap { + plog.Errorf("unexpected raft message type %s on snapshot path", m.Type) + http.Error(w, "wrong raft message type", http.StatusBadRequest) + return + } + + plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From)) + // save incoming database snapshot. + n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index) + if err != nil { + msg := fmt.Sprintf("failed to save KV snapshot (%v)", err) + plog.Error(msg) + http.Error(w, msg, http.StatusInternalServerError) + return + } + receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(n)) + plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From)) + + if err := h.r.Process(context.TODO(), m); err != nil { + switch v := err.(type) { + // Process may return writerToResponse error when doing some + // additional checks before calling raft.Node.Step. + case writerToResponse: + v.WriteTo(w) + default: + msg := fmt.Sprintf("failed to process raft message (%v)", err) + plog.Warningf(msg) + http.Error(w, msg, http.StatusInternalServerError) + } + return + } + // Write StatusNoContent header after the message has been processed by + // raft, which facilitates the client to report MsgSnap status. + w.WriteHeader(http.StatusNoContent) +} + +type streamHandler struct { + tr *Transport + peerGetter peerGetter + r Raft + id types.ID + cid types.ID +} + +func newStreamHandler(tr *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler { + return &streamHandler{ + tr: tr, + peerGetter: pg, + r: r, + id: id, + cid: cid, + } +} + +func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.Header().Set("Allow", "GET") + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("X-Server-Version", version.Version) + w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) + + if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + + var t streamType + switch path.Dir(r.URL.Path) { + case streamTypeMsgAppV2.endpoint(): + t = streamTypeMsgAppV2 + case streamTypeMessage.endpoint(): + t = streamTypeMessage + default: + plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path) + http.Error(w, "invalid path", http.StatusNotFound) + return + } + + fromStr := path.Base(r.URL.Path) + from, err := types.IDFromString(fromStr) + if err != nil { + plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err) + http.Error(w, "invalid from", http.StatusNotFound) + return + } + if h.r.IsIDRemoved(uint64(from)) { + plog.Warningf("rejected the stream from peer %s since it was removed", from) + http.Error(w, "removed member", http.StatusGone) + return + } + p := h.peerGetter.Get(from) + if p == nil { + // This may happen in following cases: + // 1. user starts a remote peer that belongs to a different cluster + // with the same cluster ID. + // 2. local etcd falls behind of the cluster, and cannot recognize + // the members that joined after its current progress. + if urls := r.Header.Get("X-PeerURLs"); urls != "" { + h.tr.AddRemote(from, strings.Split(urls, ",")) + } + plog.Errorf("failed to find member %s in cluster %s", from, h.cid) + http.Error(w, "error sender not found", http.StatusNotFound) + return + } + + wto := h.id.String() + if gto := r.Header.Get("X-Raft-To"); gto != wto { + plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto) + http.Error(w, "to field mismatch", http.StatusPreconditionFailed) + return + } + + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + + c := newCloseNotifier() + conn := &outgoingConn{ + t: t, + Writer: w, + Flusher: w.(http.Flusher), + Closer: c, + } + p.attachOutgoingConn(conn) + <-c.closeNotify() +} + +// checkClusterCompatibilityFromHeader checks the cluster compatibility of +// the local member from the given header. +// It checks whether the version of local member is compatible with +// the versions in the header, and whether the cluster ID of local member +// matches the one in the header. +func checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error { + if err := checkVersionCompability(header.Get("X-Server-From"), serverVersion(header), minClusterVersion(header)); err != nil { + plog.Errorf("request version incompatibility (%v)", err) + return errIncompatibleVersion + } + if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() { + plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid) + return errClusterIDMismatch + } + return nil +} + +type closeNotifier struct { + done chan struct{} +} + +func newCloseNotifier() *closeNotifier { + return &closeNotifier{ + done: make(chan struct{}), + } +} + +func (n *closeNotifier) Close() error { + close(n.done) + return nil +} + +func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done } diff --git a/vendor/github.com/coreos/etcd/rafthttp/metrics.go b/vendor/github.com/coreos/etcd/rafthttp/metrics.go new file mode 100644 index 000000000000..320bfe72661d --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/metrics.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import "github.com/prometheus/client_golang/prometheus" + +var ( + sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "peer_sent_bytes_total", + Help: "The total number of bytes sent to peers.", + }, + []string{"To"}, + ) + + receivedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "peer_received_bytes_total", + Help: "The total number of bytes received from peers.", + }, + []string{"From"}, + ) + + sentFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "peer_sent_failures_total", + Help: "The total number of send failures from peers.", + }, + []string{"To"}, + ) + + recvFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "peer_received_failures_total", + Help: "The total number of receive failures from peers.", + }, + []string{"From"}, + ) + + rtts = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "peer_round_trip_time_seconds", + Help: "Round-Trip-Time histogram between peers.", + Buckets: prometheus.ExponentialBuckets(0.0001, 2, 14), + }, + []string{"To"}, + ) +) + +func init() { + prometheus.MustRegister(sentBytes) + prometheus.MustRegister(receivedBytes) + prometheus.MustRegister(sentFailures) + prometheus.MustRegister(recvFailures) + prometheus.MustRegister(rtts) +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go b/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go new file mode 100644 index 000000000000..ef59bc8883f6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go @@ -0,0 +1,68 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/raft/raftpb" +) + +// messageEncoder is a encoder that can encode all kinds of messages. +// It MUST be used with a paired messageDecoder. +type messageEncoder struct { + w io.Writer +} + +func (enc *messageEncoder) encode(m *raftpb.Message) error { + if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil { + return err + } + _, err := enc.w.Write(pbutil.MustMarshal(m)) + return err +} + +// messageDecoder is a decoder that can decode all kinds of messages. +type messageDecoder struct { + r io.Reader +} + +var ( + readBytesLimit uint64 = 512 * 1024 * 1024 // 512 MB + ErrExceedSizeLimit = errors.New("rafthttp: error limit exceeded") +) + +func (dec *messageDecoder) decode() (raftpb.Message, error) { + return dec.decodeLimit(readBytesLimit) +} + +func (dec *messageDecoder) decodeLimit(numBytes uint64) (raftpb.Message, error) { + var m raftpb.Message + var l uint64 + if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil { + return m, err + } + if l > numBytes { + return m, ErrExceedSizeLimit + } + buf := make([]byte, int(l)) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return m, err + } + return m, m.Unmarshal(buf) +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go b/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go new file mode 100644 index 000000000000..013ffe7c7362 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go @@ -0,0 +1,248 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "encoding/binary" + "fmt" + "io" + "time" + + "github.com/coreos/etcd/etcdserver/stats" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft/raftpb" +) + +const ( + msgTypeLinkHeartbeat uint8 = 0 + msgTypeAppEntries uint8 = 1 + msgTypeApp uint8 = 2 + + msgAppV2BufSize = 1024 * 1024 +) + +// msgappv2 stream sends three types of message: linkHeartbeatMessage, +// AppEntries and MsgApp. AppEntries is the MsgApp that is sent in +// replicate state in raft, whose index and term are fully predictable. +// +// Data format of linkHeartbeatMessage: +// | offset | bytes | description | +// +--------+-------+-------------+ +// | 0 | 1 | \x00 | +// +// Data format of AppEntries: +// | offset | bytes | description | +// +--------+-------+-------------+ +// | 0 | 1 | \x01 | +// | 1 | 8 | length of entries | +// | 9 | 8 | length of first entry | +// | 17 | n1 | first entry | +// ... +// | x | 8 | length of k-th entry data | +// | x+8 | nk | k-th entry data | +// | x+8+nk | 8 | commit index | +// +// Data format of MsgApp: +// | offset | bytes | description | +// +--------+-------+-------------+ +// | 0 | 1 | \x02 | +// | 1 | 8 | length of encoded message | +// | 9 | n | encoded message | +type msgAppV2Encoder struct { + w io.Writer + fs *stats.FollowerStats + + term uint64 + index uint64 + buf []byte + uint64buf []byte + uint8buf []byte +} + +func newMsgAppV2Encoder(w io.Writer, fs *stats.FollowerStats) *msgAppV2Encoder { + return &msgAppV2Encoder{ + w: w, + fs: fs, + buf: make([]byte, msgAppV2BufSize), + uint64buf: make([]byte, 8), + uint8buf: make([]byte, 1), + } +} + +func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error { + start := time.Now() + switch { + case isLinkHeartbeatMessage(m): + enc.uint8buf[0] = byte(msgTypeLinkHeartbeat) + if _, err := enc.w.Write(enc.uint8buf); err != nil { + return err + } + case enc.index == m.Index && enc.term == m.LogTerm && m.LogTerm == m.Term: + enc.uint8buf[0] = byte(msgTypeAppEntries) + if _, err := enc.w.Write(enc.uint8buf); err != nil { + return err + } + // write length of entries + binary.BigEndian.PutUint64(enc.uint64buf, uint64(len(m.Entries))) + if _, err := enc.w.Write(enc.uint64buf); err != nil { + return err + } + for i := 0; i < len(m.Entries); i++ { + // write length of entry + binary.BigEndian.PutUint64(enc.uint64buf, uint64(m.Entries[i].Size())) + if _, err := enc.w.Write(enc.uint64buf); err != nil { + return err + } + if n := m.Entries[i].Size(); n < msgAppV2BufSize { + if _, err := m.Entries[i].MarshalTo(enc.buf); err != nil { + return err + } + if _, err := enc.w.Write(enc.buf[:n]); err != nil { + return err + } + } else { + if _, err := enc.w.Write(pbutil.MustMarshal(&m.Entries[i])); err != nil { + return err + } + } + enc.index++ + } + // write commit index + binary.BigEndian.PutUint64(enc.uint64buf, m.Commit) + if _, err := enc.w.Write(enc.uint64buf); err != nil { + return err + } + enc.fs.Succ(time.Since(start)) + default: + if err := binary.Write(enc.w, binary.BigEndian, msgTypeApp); err != nil { + return err + } + // write size of message + if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil { + return err + } + // write message + if _, err := enc.w.Write(pbutil.MustMarshal(m)); err != nil { + return err + } + + enc.term = m.Term + enc.index = m.Index + if l := len(m.Entries); l > 0 { + enc.index = m.Entries[l-1].Index + } + enc.fs.Succ(time.Since(start)) + } + return nil +} + +type msgAppV2Decoder struct { + r io.Reader + local, remote types.ID + + term uint64 + index uint64 + buf []byte + uint64buf []byte + uint8buf []byte +} + +func newMsgAppV2Decoder(r io.Reader, local, remote types.ID) *msgAppV2Decoder { + return &msgAppV2Decoder{ + r: r, + local: local, + remote: remote, + buf: make([]byte, msgAppV2BufSize), + uint64buf: make([]byte, 8), + uint8buf: make([]byte, 1), + } +} + +func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) { + var ( + m raftpb.Message + typ uint8 + ) + if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil { + return m, err + } + typ = uint8(dec.uint8buf[0]) + switch typ { + case msgTypeLinkHeartbeat: + return linkHeartbeatMessage, nil + case msgTypeAppEntries: + m = raftpb.Message{ + Type: raftpb.MsgApp, + From: uint64(dec.remote), + To: uint64(dec.local), + Term: dec.term, + LogTerm: dec.term, + Index: dec.index, + } + + // decode entries + if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil { + return m, err + } + l := binary.BigEndian.Uint64(dec.uint64buf) + m.Entries = make([]raftpb.Entry, int(l)) + for i := 0; i < int(l); i++ { + if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil { + return m, err + } + size := binary.BigEndian.Uint64(dec.uint64buf) + var buf []byte + if size < msgAppV2BufSize { + buf = dec.buf[:size] + if _, err := io.ReadFull(dec.r, buf); err != nil { + return m, err + } + } else { + buf = make([]byte, int(size)) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return m, err + } + } + dec.index++ + // 1 alloc + pbutil.MustUnmarshal(&m.Entries[i], buf) + } + // decode commit index + if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil { + return m, err + } + m.Commit = binary.BigEndian.Uint64(dec.uint64buf) + case msgTypeApp: + var size uint64 + if err := binary.Read(dec.r, binary.BigEndian, &size); err != nil { + return m, err + } + buf := make([]byte, int(size)) + if _, err := io.ReadFull(dec.r, buf); err != nil { + return m, err + } + pbutil.MustUnmarshal(&m, buf) + + dec.term = m.Term + dec.index = m.Index + if l := len(m.Entries); l > 0 { + dec.index = m.Entries[l-1].Index + } + default: + return m, fmt.Errorf("failed to parse type %d in msgappv2 stream", typ) + } + return m, nil +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer.go b/vendor/github.com/coreos/etcd/rafthttp/peer.go new file mode 100644 index 000000000000..a82d7beed74d --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/peer.go @@ -0,0 +1,307 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "sync" + "time" + + "github.com/coreos/etcd/etcdserver/stats" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" + "golang.org/x/net/context" +) + +const ( + // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates. + // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for + // tcp keepalive failing to detect a bad connection, which is at minutes level. + // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage + // to keep the connection alive. + // For short term pipeline connections, the connection MUST be killed to avoid it being + // put back to http pkg connection pool. + ConnReadTimeout = 5 * time.Second + ConnWriteTimeout = 5 * time.Second + + recvBufSize = 4096 + // maxPendingProposals holds the proposals during one leader election process. + // Generally one leader election takes at most 1 sec. It should have + // 0-2 election conflicts, and each one takes 0.5 sec. + // We assume the number of concurrent proposers is smaller than 4096. + // One client blocks on its proposal for at least 1 sec, so 4096 is enough + // to hold all proposals. + maxPendingProposals = 4096 + + streamAppV2 = "streamMsgAppV2" + streamMsg = "streamMsg" + pipelineMsg = "pipeline" + sendSnap = "sendMsgSnap" +) + +type Peer interface { + // send sends the message to the remote peer. The function is non-blocking + // and has no promise that the message will be received by the remote. + // When it fails to send message out, it will report the status to underlying + // raft. + send(m raftpb.Message) + + // sendSnap sends the merged snapshot message to the remote peer. Its behavior + // is similar to send. + sendSnap(m snap.Message) + + // update updates the urls of remote peer. + update(urls types.URLs) + + // attachOutgoingConn attaches the outgoing connection to the peer for + // stream usage. After the call, the ownership of the outgoing + // connection hands over to the peer. The peer will close the connection + // when it is no longer used. + attachOutgoingConn(conn *outgoingConn) + // activeSince returns the time that the connection with the + // peer becomes active. + activeSince() time.Time + // stop performs any necessary finalization and terminates the peer + // elegantly. + stop() +} + +// peer is the representative of a remote raft node. Local raft node sends +// messages to the remote through peer. +// Each peer has two underlying mechanisms to send out a message: stream and +// pipeline. +// A stream is a receiver initialized long-polling connection, which +// is always open to transfer messages. Besides general stream, peer also has +// a optimized stream for sending msgApp since msgApp accounts for large part +// of all messages. Only raft leader uses the optimized stream to send msgApp +// to the remote follower node. +// A pipeline is a series of http clients that send http requests to the remote. +// It is only used when the stream has not been established. +type peer struct { + // id of the remote raft peer node + id types.ID + r Raft + + status *peerStatus + + picker *urlPicker + + msgAppV2Writer *streamWriter + writer *streamWriter + pipeline *pipeline + snapSender *snapshotSender // snapshot sender to send v3 snapshot messages + msgAppV2Reader *streamReader + msgAppReader *streamReader + + recvc chan raftpb.Message + propc chan raftpb.Message + + mu sync.Mutex + paused bool + + cancel context.CancelFunc // cancel pending works in go routine created by peer. + stopc chan struct{} +} + +func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer { + plog.Infof("starting peer %s...", peerID) + defer plog.Infof("started peer %s", peerID) + + status := newPeerStatus(peerID) + picker := newURLPicker(urls) + errorc := transport.ErrorC + r := transport.Raft + pipeline := &pipeline{ + peerID: peerID, + tr: transport, + picker: picker, + status: status, + followerStats: fs, + raft: r, + errorc: errorc, + } + pipeline.start() + + p := &peer{ + id: peerID, + r: r, + status: status, + picker: picker, + msgAppV2Writer: startStreamWriter(peerID, status, fs, r), + writer: startStreamWriter(peerID, status, fs, r), + pipeline: pipeline, + snapSender: newSnapshotSender(transport, picker, peerID, status), + recvc: make(chan raftpb.Message, recvBufSize), + propc: make(chan raftpb.Message, maxPendingProposals), + stopc: make(chan struct{}), + } + + ctx, cancel := context.WithCancel(context.Background()) + p.cancel = cancel + go func() { + for { + select { + case mm := <-p.recvc: + if err := r.Process(ctx, mm); err != nil { + plog.Warningf("failed to process raft message (%v)", err) + } + case <-p.stopc: + return + } + } + }() + + // r.Process might block for processing proposal when there is no leader. + // Thus propc must be put into a separate routine with recvc to avoid blocking + // processing other raft messages. + go func() { + for { + select { + case mm := <-p.propc: + if err := r.Process(ctx, mm); err != nil { + plog.Warningf("failed to process raft message (%v)", err) + } + case <-p.stopc: + return + } + } + }() + + p.msgAppV2Reader = &streamReader{ + peerID: peerID, + typ: streamTypeMsgAppV2, + tr: transport, + picker: picker, + status: status, + recvc: p.recvc, + propc: p.propc, + } + p.msgAppReader = &streamReader{ + peerID: peerID, + typ: streamTypeMessage, + tr: transport, + picker: picker, + status: status, + recvc: p.recvc, + propc: p.propc, + } + p.msgAppV2Reader.start() + p.msgAppReader.start() + + return p +} + +func (p *peer) send(m raftpb.Message) { + p.mu.Lock() + paused := p.paused + p.mu.Unlock() + + if paused { + return + } + + writec, name := p.pick(m) + select { + case writec <- m: + default: + p.r.ReportUnreachable(m.To) + if isMsgSnap(m) { + p.r.ReportSnapshot(m.To, raft.SnapshotFailure) + } + if p.status.isActive() { + plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name) + } + plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name) + } +} + +func (p *peer) sendSnap(m snap.Message) { + go p.snapSender.send(m) +} + +func (p *peer) update(urls types.URLs) { + p.picker.update(urls) +} + +func (p *peer) attachOutgoingConn(conn *outgoingConn) { + var ok bool + switch conn.t { + case streamTypeMsgAppV2: + ok = p.msgAppV2Writer.attach(conn) + case streamTypeMessage: + ok = p.writer.attach(conn) + default: + plog.Panicf("unhandled stream type %s", conn.t) + } + if !ok { + conn.Close() + } +} + +func (p *peer) activeSince() time.Time { return p.status.activeSince() } + +// Pause pauses the peer. The peer will simply drops all incoming +// messages without returning an error. +func (p *peer) Pause() { + p.mu.Lock() + defer p.mu.Unlock() + p.paused = true + p.msgAppReader.pause() + p.msgAppV2Reader.pause() +} + +// Resume resumes a paused peer. +func (p *peer) Resume() { + p.mu.Lock() + defer p.mu.Unlock() + p.paused = false + p.msgAppReader.resume() + p.msgAppV2Reader.resume() +} + +func (p *peer) stop() { + plog.Infof("stopping peer %s...", p.id) + defer plog.Infof("stopped peer %s", p.id) + + close(p.stopc) + p.cancel() + p.msgAppV2Writer.stop() + p.writer.stop() + p.pipeline.stop() + p.snapSender.stop() + p.msgAppV2Reader.stop() + p.msgAppReader.stop() +} + +// pick picks a chan for sending the given message. The picked chan and the picked chan +// string name are returned. +func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) { + var ok bool + // Considering MsgSnap may have a big size, e.g., 1G, and will block + // stream for a long time, only use one of the N pipelines to send MsgSnap. + if isMsgSnap(m) { + return p.pipeline.msgc, pipelineMsg + } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) { + return writec, streamAppV2 + } else if writec, ok = p.writer.writec(); ok { + return writec, streamMsg + } + return p.pipeline.msgc, pipelineMsg +} + +func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp } + +func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap } diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer_status.go b/vendor/github.com/coreos/etcd/rafthttp/peer_status.go new file mode 100644 index 000000000000..706144f6466d --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/peer_status.go @@ -0,0 +1,77 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "fmt" + "sync" + "time" + + "github.com/coreos/etcd/pkg/types" +) + +type failureType struct { + source string + action string +} + +type peerStatus struct { + id types.ID + mu sync.Mutex // protect variables below + active bool + since time.Time +} + +func newPeerStatus(id types.ID) *peerStatus { + return &peerStatus{ + id: id, + } +} + +func (s *peerStatus) activate() { + s.mu.Lock() + defer s.mu.Unlock() + if !s.active { + plog.Infof("peer %s became active", s.id) + s.active = true + s.since = time.Now() + } +} + +func (s *peerStatus) deactivate(failure failureType, reason string) { + s.mu.Lock() + defer s.mu.Unlock() + msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason) + if s.active { + plog.Errorf(msg) + plog.Infof("peer %s became inactive", s.id) + s.active = false + s.since = time.Time{} + return + } + plog.Debugf(msg) +} + +func (s *peerStatus) isActive() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.active +} + +func (s *peerStatus) activeSince() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.since +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go new file mode 100644 index 000000000000..d9f07c3479d9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go @@ -0,0 +1,160 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "bytes" + "context" + "errors" + "io/ioutil" + "sync" + "time" + + "github.com/coreos/etcd/etcdserver/stats" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" +) + +const ( + connPerPipeline = 4 + // pipelineBufSize is the size of pipeline buffer, which helps hold the + // temporary network latency. + // The size ensures that pipeline does not drop messages when the network + // is out of work for less than 1 second in good path. + pipelineBufSize = 64 +) + +var errStopped = errors.New("stopped") + +type pipeline struct { + peerID types.ID + + tr *Transport + picker *urlPicker + status *peerStatus + raft Raft + errorc chan error + // deprecate when we depercate v2 API + followerStats *stats.FollowerStats + + msgc chan raftpb.Message + // wait for the handling routines + wg sync.WaitGroup + stopc chan struct{} +} + +func (p *pipeline) start() { + p.stopc = make(chan struct{}) + p.msgc = make(chan raftpb.Message, pipelineBufSize) + p.wg.Add(connPerPipeline) + for i := 0; i < connPerPipeline; i++ { + go p.handle() + } + plog.Infof("started HTTP pipelining with peer %s", p.peerID) +} + +func (p *pipeline) stop() { + close(p.stopc) + p.wg.Wait() + plog.Infof("stopped HTTP pipelining with peer %s", p.peerID) +} + +func (p *pipeline) handle() { + defer p.wg.Done() + + for { + select { + case m := <-p.msgc: + start := time.Now() + err := p.post(pbutil.MustMarshal(&m)) + end := time.Now() + + if err != nil { + p.status.deactivate(failureType{source: pipelineMsg, action: "write"}, err.Error()) + + if m.Type == raftpb.MsgApp && p.followerStats != nil { + p.followerStats.Fail() + } + p.raft.ReportUnreachable(m.To) + if isMsgSnap(m) { + p.raft.ReportSnapshot(m.To, raft.SnapshotFailure) + } + sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() + continue + } + + p.status.activate() + if m.Type == raftpb.MsgApp && p.followerStats != nil { + p.followerStats.Succ(end.Sub(start)) + } + if isMsgSnap(m) { + p.raft.ReportSnapshot(m.To, raft.SnapshotFinish) + } + sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(m.Size())) + case <-p.stopc: + return + } + } +} + +// post POSTs a data payload to a url. Returns nil if the POST succeeds, +// error on any failure. +func (p *pipeline) post(data []byte) (err error) { + u := p.picker.pick() + req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID) + + done := make(chan struct{}, 1) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + go func() { + select { + case <-done: + case <-p.stopc: + waitSchedule() + cancel() + } + }() + + resp, err := p.tr.pipelineRt.RoundTrip(req) + done <- struct{}{} + if err != nil { + p.picker.unreachable(u) + return err + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + p.picker.unreachable(u) + return err + } + resp.Body.Close() + + err = checkPostResponse(resp, b, req, p.peerID) + if err != nil { + p.picker.unreachable(u) + // errMemberRemoved is a critical error since a removed member should + // always be stopped. So we use reportCriticalError to report it to errorc. + if err == errMemberRemoved { + reportCriticalError(err, p.errorc) + } + return err + } + + return nil +} + +// waitSchedule waits other goroutines to be scheduled for a while +func waitSchedule() { time.Sleep(time.Millisecond) } diff --git a/vendor/github.com/coreos/etcd/rafthttp/probing_status.go b/vendor/github.com/coreos/etcd/rafthttp/probing_status.go new file mode 100644 index 000000000000..c7a3c7ab9318 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/probing_status.go @@ -0,0 +1,67 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "time" + + "github.com/xiang90/probing" +) + +var ( + // proberInterval must be shorter than read timeout. + // Or the connection will time-out. + proberInterval = ConnReadTimeout - time.Second + statusMonitoringInterval = 30 * time.Second + statusErrorInterval = 5 * time.Second +) + +func addPeerToProber(p probing.Prober, id string, us []string) { + hus := make([]string, len(us)) + for i := range us { + hus[i] = us[i] + ProbingPrefix + } + + p.AddHTTP(id, proberInterval, hus) + + s, err := p.Status(id) + if err != nil { + plog.Errorf("failed to add peer %s into prober", id) + } else { + go monitorProbingStatus(s, id) + } +} + +func monitorProbingStatus(s probing.Status, id string) { + // set the first interval short to log error early. + interval := statusErrorInterval + for { + select { + case <-time.After(interval): + if !s.Health() { + plog.Warningf("health check for peer %s could not connect: %v", id, s.Err()) + interval = statusErrorInterval + } else { + interval = statusMonitoringInterval + } + if s.ClockDiff() > time.Second { + plog.Warningf("the clock difference against peer %s is too high [%v > %v]", id, s.ClockDiff(), time.Second) + } + rtts.WithLabelValues(id).Observe(s.SRTT().Seconds()) + case <-s.StopNotify(): + return + } + } +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/remote.go b/vendor/github.com/coreos/etcd/rafthttp/remote.go new file mode 100644 index 000000000000..c62c818235ad --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/remote.go @@ -0,0 +1,69 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft/raftpb" +) + +type remote struct { + id types.ID + status *peerStatus + pipeline *pipeline +} + +func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote { + picker := newURLPicker(urls) + status := newPeerStatus(id) + pipeline := &pipeline{ + peerID: id, + tr: tr, + picker: picker, + status: status, + raft: tr.Raft, + errorc: tr.ErrorC, + } + pipeline.start() + + return &remote{ + id: id, + status: status, + pipeline: pipeline, + } +} + +func (g *remote) send(m raftpb.Message) { + select { + case g.pipeline.msgc <- m: + default: + if g.status.isActive() { + plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id) + } + plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id) + } +} + +func (g *remote) stop() { + g.pipeline.stop() +} + +func (g *remote) Pause() { + g.stop() +} + +func (g *remote) Resume() { + g.pipeline.start() +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go new file mode 100644 index 000000000000..52273c9d195e --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go @@ -0,0 +1,157 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/coreos/etcd/pkg/httputil" + pioutil "github.com/coreos/etcd/pkg/ioutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/snap" +) + +var ( + // timeout for reading snapshot response body + snapResponseReadTimeout = 5 * time.Second +) + +type snapshotSender struct { + from, to types.ID + cid types.ID + + tr *Transport + picker *urlPicker + status *peerStatus + r Raft + errorc chan error + + stopc chan struct{} +} + +func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *peerStatus) *snapshotSender { + return &snapshotSender{ + from: tr.ID, + to: to, + cid: tr.ClusterID, + tr: tr, + picker: picker, + status: status, + r: tr.Raft, + errorc: tr.ErrorC, + stopc: make(chan struct{}), + } +} + +func (s *snapshotSender) stop() { close(s.stopc) } + +func (s *snapshotSender) send(merged snap.Message) { + m := merged.Message + + body := createSnapBody(merged) + defer body.Close() + + u := s.picker.pick() + req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid) + + plog.Infof("start to send database snapshot [index: %d, to %s]...", m.Snapshot.Metadata.Index, types.ID(m.To)) + + err := s.post(req) + defer merged.CloseWithError(err) + if err != nil { + plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err) + + // errMemberRemoved is a critical error since a removed member should + // always be stopped. So we use reportCriticalError to report it to errorc. + if err == errMemberRemoved { + reportCriticalError(err, s.errorc) + } + + s.picker.unreachable(u) + s.status.deactivate(failureType{source: sendSnap, action: "post"}, err.Error()) + s.r.ReportUnreachable(m.To) + // report SnapshotFailure to raft state machine. After raft state + // machine knows about it, it would pause a while and retry sending + // new snapshot message. + s.r.ReportSnapshot(m.To, raft.SnapshotFailure) + sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() + return + } + s.status.activate() + s.r.ReportSnapshot(m.To, raft.SnapshotFinish) + plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To)) + + sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(merged.TotalSize)) +} + +// post posts the given request. +// It returns nil when request is sent out and processed successfully. +func (s *snapshotSender) post(req *http.Request) (err error) { + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + defer cancel() + + type responseAndError struct { + resp *http.Response + body []byte + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := s.tr.pipelineRt.RoundTrip(req) + if err != nil { + result <- responseAndError{resp, nil, err} + return + } + + // close the response body when timeouts. + // prevents from reading the body forever when the other side dies right after + // successfully receives the request body. + time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) }) + body, err := ioutil.ReadAll(resp.Body) + result <- responseAndError{resp, body, err} + }() + + select { + case <-s.stopc: + return errStopped + case r := <-result: + if r.err != nil { + return r.err + } + return checkPostResponse(r.resp, r.body, req, s.to) + } +} + +func createSnapBody(merged snap.Message) io.ReadCloser { + buf := new(bytes.Buffer) + enc := &messageEncoder{w: buf} + // encode raft message + if err := enc.encode(&merged.Message); err != nil { + plog.Panicf("encode message error (%v)", err) + } + + return &pioutil.ReaderAndCloser{ + Reader: io.MultiReader(buf, merged.ReadCloser), + Closer: merged.ReadCloser, + } +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/github.com/coreos/etcd/rafthttp/stream.go new file mode 100644 index 000000000000..2a6c620f56dc --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/stream.go @@ -0,0 +1,527 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "path" + "strings" + "sync" + "time" + + "github.com/coreos/etcd/etcdserver/stats" + "github.com/coreos/etcd/pkg/httputil" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/version" + "github.com/coreos/go-semver/semver" +) + +const ( + streamTypeMessage streamType = "message" + streamTypeMsgAppV2 streamType = "msgappv2" + + streamBufSize = 4096 +) + +var ( + errUnsupportedStreamType = fmt.Errorf("unsupported stream type") + + // the key is in string format "major.minor.patch" + supportedStream = map[string][]streamType{ + "2.0.0": {}, + "2.1.0": {streamTypeMsgAppV2, streamTypeMessage}, + "2.2.0": {streamTypeMsgAppV2, streamTypeMessage}, + "2.3.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.0.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.1.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.2.0": {streamTypeMsgAppV2, streamTypeMessage}, + } +) + +type streamType string + +func (t streamType) endpoint() string { + switch t { + case streamTypeMsgAppV2: + return path.Join(RaftStreamPrefix, "msgapp") + case streamTypeMessage: + return path.Join(RaftStreamPrefix, "message") + default: + plog.Panicf("unhandled stream type %v", t) + return "" + } +} + +func (t streamType) String() string { + switch t { + case streamTypeMsgAppV2: + return "stream MsgApp v2" + case streamTypeMessage: + return "stream Message" + default: + return "unknown stream" + } +} + +var ( + // linkHeartbeatMessage is a special message used as heartbeat message in + // link layer. It never conflicts with messages from raft because raft + // doesn't send out messages without From and To fields. + linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat} +) + +func isLinkHeartbeatMessage(m *raftpb.Message) bool { + return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0 +} + +type outgoingConn struct { + t streamType + io.Writer + http.Flusher + io.Closer +} + +// streamWriter writes messages to the attached outgoingConn. +type streamWriter struct { + peerID types.ID + status *peerStatus + fs *stats.FollowerStats + r Raft + + mu sync.Mutex // guard field working and closer + closer io.Closer + working bool + + msgc chan raftpb.Message + connc chan *outgoingConn + stopc chan struct{} + done chan struct{} +} + +// startStreamWriter creates a streamWrite and starts a long running go-routine that accepts +// messages and writes to the attached outgoing connection. +func startStreamWriter(id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter { + w := &streamWriter{ + peerID: id, + status: status, + fs: fs, + r: r, + msgc: make(chan raftpb.Message, streamBufSize), + connc: make(chan *outgoingConn), + stopc: make(chan struct{}), + done: make(chan struct{}), + } + go w.run() + return w +} + +func (cw *streamWriter) run() { + var ( + msgc chan raftpb.Message + heartbeatc <-chan time.Time + t streamType + enc encoder + flusher http.Flusher + batched int + ) + tickc := time.NewTicker(ConnReadTimeout / 3) + defer tickc.Stop() + unflushed := 0 + + plog.Infof("started streaming with peer %s (writer)", cw.peerID) + + for { + select { + case <-heartbeatc: + err := enc.encode(&linkHeartbeatMessage) + unflushed += linkHeartbeatMessage.Size() + if err == nil { + flusher.Flush() + batched = 0 + sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed)) + unflushed = 0 + continue + } + + cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error()) + + sentFailures.WithLabelValues(cw.peerID.String()).Inc() + cw.close() + plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + heartbeatc, msgc = nil, nil + + case m := <-msgc: + err := enc.encode(&m) + if err == nil { + unflushed += m.Size() + + if len(msgc) == 0 || batched > streamBufSize/2 { + flusher.Flush() + sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed)) + unflushed = 0 + batched = 0 + } else { + batched++ + } + + continue + } + + cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error()) + cw.close() + plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + heartbeatc, msgc = nil, nil + cw.r.ReportUnreachable(m.To) + sentFailures.WithLabelValues(cw.peerID.String()).Inc() + + case conn := <-cw.connc: + cw.mu.Lock() + closed := cw.closeUnlocked() + t = conn.t + switch conn.t { + case streamTypeMsgAppV2: + enc = newMsgAppV2Encoder(conn.Writer, cw.fs) + case streamTypeMessage: + enc = &messageEncoder{w: conn.Writer} + default: + plog.Panicf("unhandled stream type %s", conn.t) + } + flusher = conn.Flusher + unflushed = 0 + cw.status.activate() + cw.closer = conn.Closer + cw.working = true + cw.mu.Unlock() + + if closed { + plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + } + plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + heartbeatc, msgc = tickc.C, cw.msgc + case <-cw.stopc: + if cw.close() { + plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + } + plog.Infof("stopped streaming with peer %s (writer)", cw.peerID) + close(cw.done) + return + } + } +} + +func (cw *streamWriter) writec() (chan<- raftpb.Message, bool) { + cw.mu.Lock() + defer cw.mu.Unlock() + return cw.msgc, cw.working +} + +func (cw *streamWriter) close() bool { + cw.mu.Lock() + defer cw.mu.Unlock() + return cw.closeUnlocked() +} + +func (cw *streamWriter) closeUnlocked() bool { + if !cw.working { + return false + } + cw.closer.Close() + if len(cw.msgc) > 0 { + cw.r.ReportUnreachable(uint64(cw.peerID)) + } + cw.msgc = make(chan raftpb.Message, streamBufSize) + cw.working = false + return true +} + +func (cw *streamWriter) attach(conn *outgoingConn) bool { + select { + case cw.connc <- conn: + return true + case <-cw.done: + return false + } +} + +func (cw *streamWriter) stop() { + close(cw.stopc) + <-cw.done +} + +// streamReader is a long-running go-routine that dials to the remote stream +// endpoint and reads messages from the response body returned. +type streamReader struct { + peerID types.ID + typ streamType + + tr *Transport + picker *urlPicker + status *peerStatus + recvc chan<- raftpb.Message + propc chan<- raftpb.Message + + errorc chan<- error + + mu sync.Mutex + paused bool + cancel func() + closer io.Closer + + stopc chan struct{} + done chan struct{} +} + +func (r *streamReader) start() { + r.stopc = make(chan struct{}) + r.done = make(chan struct{}) + if r.errorc == nil { + r.errorc = r.tr.ErrorC + } + + go r.run() +} + +func (cr *streamReader) run() { + t := cr.typ + plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t) + for { + rc, err := cr.dial(t) + if err != nil { + if err != errUnsupportedStreamType { + cr.status.deactivate(failureType{source: t.String(), action: "dial"}, err.Error()) + } + } else { + cr.status.activate() + plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) + err := cr.decodeLoop(rc, t) + plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) + switch { + // all data is read out + case err == io.EOF: + // connection is closed by the remote + case transport.IsClosedConnError(err): + default: + cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error()) + } + } + select { + // Wait 100ms to create a new stream, so it doesn't bring too much + // overhead when retry. + case <-time.After(100 * time.Millisecond): + case <-cr.stopc: + plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t) + close(cr.done) + return + } + } +} + +func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { + var dec decoder + cr.mu.Lock() + switch t { + case streamTypeMsgAppV2: + dec = newMsgAppV2Decoder(rc, cr.tr.ID, cr.peerID) + case streamTypeMessage: + dec = &messageDecoder{r: rc} + default: + plog.Panicf("unhandled stream type %s", t) + } + select { + case <-cr.stopc: + cr.mu.Unlock() + if err := rc.Close(); err != nil { + return err + } + return io.EOF + default: + cr.closer = rc + } + cr.mu.Unlock() + + for { + m, err := dec.decode() + if err != nil { + cr.mu.Lock() + cr.close() + cr.mu.Unlock() + return err + } + + receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size())) + + cr.mu.Lock() + paused := cr.paused + cr.mu.Unlock() + + if paused { + continue + } + + if isLinkHeartbeatMessage(&m) { + // raft is not interested in link layer + // heartbeat message, so we should ignore + // it. + continue + } + + recvc := cr.recvc + if m.Type == raftpb.MsgProp { + recvc = cr.propc + } + + select { + case recvc <- m: + default: + if cr.status.isActive() { + plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From)) + } + plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From)) + recvFailures.WithLabelValues(types.ID(m.From).String()).Inc() + } + } +} + +func (cr *streamReader) stop() { + close(cr.stopc) + cr.mu.Lock() + if cr.cancel != nil { + cr.cancel() + } + cr.close() + cr.mu.Unlock() + <-cr.done +} + +func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { + u := cr.picker.pick() + uu := u + uu.Path = path.Join(t.endpoint(), cr.tr.ID.String()) + + req, err := http.NewRequest("GET", uu.String(), nil) + if err != nil { + cr.picker.unreachable(u) + return nil, fmt.Errorf("failed to make http request to %v (%v)", u, err) + } + req.Header.Set("X-Server-From", cr.tr.ID.String()) + req.Header.Set("X-Server-Version", version.Version) + req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion) + req.Header.Set("X-Etcd-Cluster-ID", cr.tr.ClusterID.String()) + req.Header.Set("X-Raft-To", cr.peerID.String()) + + setPeerURLsHeader(req, cr.tr.URLs) + + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + + cr.mu.Lock() + cr.cancel = cancel + select { + case <-cr.stopc: + cr.mu.Unlock() + return nil, fmt.Errorf("stream reader is stopped") + default: + } + cr.mu.Unlock() + + resp, err := cr.tr.streamRt.RoundTrip(req) + if err != nil { + cr.picker.unreachable(u) + return nil, err + } + + rv := serverVersion(resp.Header) + lv := semver.Must(semver.NewVersion(version.Version)) + if compareMajorMinorVersion(rv, lv) == -1 && !checkStreamSupport(rv, t) { + httputil.GracefulClose(resp) + cr.picker.unreachable(u) + return nil, errUnsupportedStreamType + } + + switch resp.StatusCode { + case http.StatusGone: + httputil.GracefulClose(resp) + cr.picker.unreachable(u) + reportCriticalError(errMemberRemoved, cr.errorc) + return nil, errMemberRemoved + case http.StatusOK: + return resp.Body, nil + case http.StatusNotFound: + httputil.GracefulClose(resp) + cr.picker.unreachable(u) + return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID) + case http.StatusPreconditionFailed: + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + cr.picker.unreachable(u) + return nil, err + } + httputil.GracefulClose(resp) + cr.picker.unreachable(u) + + switch strings.TrimSuffix(string(b), "\n") { + case errIncompatibleVersion.Error(): + plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID) + return nil, errIncompatibleVersion + case errClusterIDMismatch.Error(): + plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)", + cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID) + return nil, errClusterIDMismatch + default: + return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b)) + } + default: + httputil.GracefulClose(resp) + cr.picker.unreachable(u) + return nil, fmt.Errorf("unhandled http status %d", resp.StatusCode) + } +} + +func (cr *streamReader) close() { + if cr.closer != nil { + cr.closer.Close() + } + cr.closer = nil +} + +func (cr *streamReader) pause() { + cr.mu.Lock() + defer cr.mu.Unlock() + cr.paused = true +} + +func (cr *streamReader) resume() { + cr.mu.Lock() + defer cr.mu.Unlock() + cr.paused = false +} + +// checkStreamSupport checks whether the stream type is supported in the +// given version. +func checkStreamSupport(v *semver.Version, t streamType) bool { + nv := &semver.Version{Major: v.Major, Minor: v.Minor} + for _, s := range supportedStream[nv.String()] { + if s == t { + return true + } + } + return false +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/transport.go b/vendor/github.com/coreos/etcd/rafthttp/transport.go new file mode 100644 index 000000000000..1f0b46836e66 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/transport.go @@ -0,0 +1,402 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "net/http" + "sync" + "time" + + "github.com/coreos/etcd/etcdserver/stats" + "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" + "github.com/coreos/pkg/capnslog" + "github.com/xiang90/probing" + "golang.org/x/net/context" +) + +var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "rafthttp")) + +type Raft interface { + Process(ctx context.Context, m raftpb.Message) error + IsIDRemoved(id uint64) bool + ReportUnreachable(id uint64) + ReportSnapshot(id uint64, status raft.SnapshotStatus) +} + +type Transporter interface { + // Start starts the given Transporter. + // Start MUST be called before calling other functions in the interface. + Start() error + // Handler returns the HTTP handler of the transporter. + // A transporter HTTP handler handles the HTTP requests + // from remote peers. + // The handler MUST be used to handle RaftPrefix(/raft) + // endpoint. + Handler() http.Handler + // Send sends out the given messages to the remote peers. + // Each message has a To field, which is an id that maps + // to an existing peer in the transport. + // If the id cannot be found in the transport, the message + // will be ignored. + Send(m []raftpb.Message) + // SendSnapshot sends out the given snapshot message to a remote peer. + // The behavior of SendSnapshot is similar to Send. + SendSnapshot(m snap.Message) + // AddRemote adds a remote with given peer urls into the transport. + // A remote helps newly joined member to catch up the progress of cluster, + // and will not be used after that. + // It is the caller's responsibility to ensure the urls are all valid, + // or it panics. + AddRemote(id types.ID, urls []string) + // AddPeer adds a peer with given peer urls into the transport. + // It is the caller's responsibility to ensure the urls are all valid, + // or it panics. + // Peer urls are used to connect to the remote peer. + AddPeer(id types.ID, urls []string) + // RemovePeer removes the peer with given id. + RemovePeer(id types.ID) + // RemoveAllPeers removes all the existing peers in the transport. + RemoveAllPeers() + // UpdatePeer updates the peer urls of the peer with the given id. + // It is the caller's responsibility to ensure the urls are all valid, + // or it panics. + UpdatePeer(id types.ID, urls []string) + // ActiveSince returns the time that the connection with the peer + // of the given id becomes active. + // If the connection is active since peer was added, it returns the adding time. + // If the connection is currently inactive, it returns zero time. + ActiveSince(id types.ID) time.Time + // Stop closes the connections and stops the transporter. + Stop() +} + +// Transport implements Transporter interface. It provides the functionality +// to send raft messages to peers, and receive raft messages from peers. +// User should call Handler method to get a handler to serve requests +// received from peerURLs. +// User needs to call Start before calling other functions, and call +// Stop when the Transport is no longer used. +type Transport struct { + DialTimeout time.Duration // maximum duration before timing out dial of the request + TLSInfo transport.TLSInfo // TLS information used when creating connection + + ID types.ID // local member ID + URLs types.URLs // local peer URLs + ClusterID types.ID // raft cluster ID for request validation + Raft Raft // raft state machine, to which the Transport forwards received messages and reports status + Snapshotter *snap.Snapshotter + ServerStats *stats.ServerStats // used to record general transportation statistics + // used to record transportation statistics with followers when + // performing as leader in raft protocol + LeaderStats *stats.LeaderStats + // ErrorC is used to report detected critical errors, e.g., + // the member has been permanently removed from the cluster + // When an error is received from ErrorC, user should stop raft state + // machine and thus stop the Transport. + ErrorC chan error + + streamRt http.RoundTripper // roundTripper used by streams + pipelineRt http.RoundTripper // roundTripper used by pipelines + + mu sync.RWMutex // protect the remote and peer map + remotes map[types.ID]*remote // remotes map that helps newly joined member to catch up + peers map[types.ID]Peer // peers map + + prober probing.Prober +} + +func (t *Transport) Start() error { + var err error + t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout) + if err != nil { + return err + } + t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout) + if err != nil { + return err + } + t.remotes = make(map[types.ID]*remote) + t.peers = make(map[types.ID]Peer) + t.prober = probing.NewProber(t.pipelineRt) + return nil +} + +func (t *Transport) Handler() http.Handler { + pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID) + streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID) + snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID) + mux := http.NewServeMux() + mux.Handle(RaftPrefix, pipelineHandler) + mux.Handle(RaftStreamPrefix+"/", streamHandler) + mux.Handle(RaftSnapshotPrefix, snapHandler) + mux.Handle(ProbingPrefix, probing.NewHandler()) + return mux +} + +func (t *Transport) Get(id types.ID) Peer { + t.mu.RLock() + defer t.mu.RUnlock() + return t.peers[id] +} + +func (t *Transport) Send(msgs []raftpb.Message) { + for _, m := range msgs { + if m.To == 0 { + // ignore intentionally dropped message + continue + } + to := types.ID(m.To) + + t.mu.RLock() + p, pok := t.peers[to] + g, rok := t.remotes[to] + t.mu.RUnlock() + + if pok { + if m.Type == raftpb.MsgApp { + t.ServerStats.SendAppendReq(m.Size()) + } + p.send(m) + continue + } + + if rok { + g.send(m) + continue + } + + plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to) + } +} + +func (t *Transport) Stop() { + t.mu.Lock() + defer t.mu.Unlock() + for _, r := range t.remotes { + r.stop() + } + for _, p := range t.peers { + p.stop() + } + t.prober.RemoveAll() + if tr, ok := t.streamRt.(*http.Transport); ok { + tr.CloseIdleConnections() + } + if tr, ok := t.pipelineRt.(*http.Transport); ok { + tr.CloseIdleConnections() + } + t.peers = nil + t.remotes = nil +} + +// CutPeer drops messages to the specified peer. +func (t *Transport) CutPeer(id types.ID) { + t.mu.RLock() + p, pok := t.peers[id] + g, gok := t.remotes[id] + t.mu.RUnlock() + + if pok { + p.(Pausable).Pause() + } + if gok { + g.Pause() + } +} + +// MendPeer recovers the message dropping behavior of the given peer. +func (t *Transport) MendPeer(id types.ID) { + t.mu.RLock() + p, pok := t.peers[id] + g, gok := t.remotes[id] + t.mu.RUnlock() + + if pok { + p.(Pausable).Resume() + } + if gok { + g.Resume() + } +} + +func (t *Transport) AddRemote(id types.ID, us []string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.remotes == nil { + // there's no clean way to shutdown the golang http server + // (see: https://github.com/golang/go/issues/4674) before + // stopping the transport; ignore any new connections. + return + } + if _, ok := t.peers[id]; ok { + return + } + if _, ok := t.remotes[id]; ok { + return + } + urls, err := types.NewURLs(us) + if err != nil { + plog.Panicf("newURLs %+v should never fail: %+v", us, err) + } + t.remotes[id] = startRemote(t, urls, id) +} + +func (t *Transport) AddPeer(id types.ID, us []string) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.peers == nil { + panic("transport stopped") + } + if _, ok := t.peers[id]; ok { + return + } + urls, err := types.NewURLs(us) + if err != nil { + plog.Panicf("newURLs %+v should never fail: %+v", us, err) + } + fs := t.LeaderStats.Follower(id.String()) + t.peers[id] = startPeer(t, urls, id, fs) + addPeerToProber(t.prober, id.String(), us) + + plog.Infof("added peer %s", id) +} + +func (t *Transport) RemovePeer(id types.ID) { + t.mu.Lock() + defer t.mu.Unlock() + t.removePeer(id) +} + +func (t *Transport) RemoveAllPeers() { + t.mu.Lock() + defer t.mu.Unlock() + for id := range t.peers { + t.removePeer(id) + } +} + +// the caller of this function must have the peers mutex. +func (t *Transport) removePeer(id types.ID) { + if peer, ok := t.peers[id]; ok { + peer.stop() + } else { + plog.Panicf("unexpected removal of unknown peer '%d'", id) + } + delete(t.peers, id) + delete(t.LeaderStats.Followers, id.String()) + t.prober.Remove(id.String()) + plog.Infof("removed peer %s", id) +} + +func (t *Transport) UpdatePeer(id types.ID, us []string) { + t.mu.Lock() + defer t.mu.Unlock() + // TODO: return error or just panic? + if _, ok := t.peers[id]; !ok { + return + } + urls, err := types.NewURLs(us) + if err != nil { + plog.Panicf("newURLs %+v should never fail: %+v", us, err) + } + t.peers[id].update(urls) + + t.prober.Remove(id.String()) + addPeerToProber(t.prober, id.String(), us) + plog.Infof("updated peer %s", id) +} + +func (t *Transport) ActiveSince(id types.ID) time.Time { + t.mu.Lock() + defer t.mu.Unlock() + if p, ok := t.peers[id]; ok { + return p.activeSince() + } + return time.Time{} +} + +func (t *Transport) SendSnapshot(m snap.Message) { + t.mu.Lock() + defer t.mu.Unlock() + p := t.peers[types.ID(m.To)] + if p == nil { + m.CloseWithError(errMemberNotFound) + return + } + p.sendSnap(m) +} + +// Pausable is a testing interface for pausing transport traffic. +type Pausable interface { + Pause() + Resume() +} + +func (t *Transport) Pause() { + for _, p := range t.peers { + p.(Pausable).Pause() + } +} + +func (t *Transport) Resume() { + for _, p := range t.peers { + p.(Pausable).Resume() + } +} + +type nopTransporter struct{} + +func NewNopTransporter() Transporter { + return &nopTransporter{} +} + +func (s *nopTransporter) Start() error { return nil } +func (s *nopTransporter) Handler() http.Handler { return nil } +func (s *nopTransporter) Send(m []raftpb.Message) {} +func (s *nopTransporter) SendSnapshot(m snap.Message) {} +func (s *nopTransporter) AddRemote(id types.ID, us []string) {} +func (s *nopTransporter) AddPeer(id types.ID, us []string) {} +func (s *nopTransporter) RemovePeer(id types.ID) {} +func (s *nopTransporter) RemoveAllPeers() {} +func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {} +func (s *nopTransporter) ActiveSince(id types.ID) time.Time { return time.Time{} } +func (s *nopTransporter) Stop() {} +func (s *nopTransporter) Pause() {} +func (s *nopTransporter) Resume() {} + +type snapTransporter struct { + nopTransporter + snapDoneC chan snap.Message + snapDir string +} + +func NewSnapTransporter(snapDir string) (Transporter, <-chan snap.Message) { + ch := make(chan snap.Message, 1) + tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir} + return tr, ch +} + +func (s *snapTransporter) SendSnapshot(m snap.Message) { + ss := snap.New(s.snapDir) + ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1) + m.CloseWithError(nil) + s.snapDoneC <- m +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/urlpick.go b/vendor/github.com/coreos/etcd/rafthttp/urlpick.go new file mode 100644 index 000000000000..61839deeb70e --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/urlpick.go @@ -0,0 +1,57 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "net/url" + "sync" + + "github.com/coreos/etcd/pkg/types" +) + +type urlPicker struct { + mu sync.Mutex // guards urls and picked + urls types.URLs + picked int +} + +func newURLPicker(urls types.URLs) *urlPicker { + return &urlPicker{ + urls: urls, + } +} + +func (p *urlPicker) update(urls types.URLs) { + p.mu.Lock() + defer p.mu.Unlock() + p.urls = urls + p.picked = 0 +} + +func (p *urlPicker) pick() url.URL { + p.mu.Lock() + defer p.mu.Unlock() + return p.urls[p.picked] +} + +// unreachable notices the picker that the given url is unreachable, +// and it should use other possible urls. +func (p *urlPicker) unreachable(u url.URL) { + p.mu.Lock() + defer p.mu.Unlock() + if u == p.urls[p.picked] { + p.picked = (p.picked + 1) % len(p.urls) + } +} diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/github.com/coreos/etcd/rafthttp/util.go new file mode 100644 index 000000000000..12e548c77175 --- /dev/null +++ b/vendor/github.com/coreos/etcd/rafthttp/util.go @@ -0,0 +1,177 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/version" + "github.com/coreos/go-semver/semver" +) + +var ( + errMemberRemoved = fmt.Errorf("the member has been permanently removed from the cluster") + errMemberNotFound = fmt.Errorf("member not found") +) + +// NewListener returns a listener for raft message transfer between peers. +// It uses timeout listener to identify broken streams promptly. +func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) { + return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout) +} + +// NewRoundTripper returns a roundTripper used to send requests +// to rafthttp listener of remote peers. +func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { + // It uses timeout transport to pair with remote timeout listeners. + // It sets no read/write timeout, because message in requests may + // take long time to write out before reading out the response. + return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0) +} + +// newStreamRoundTripper returns a roundTripper used to send stream requests +// to rafthttp listener of remote peers. +// Read/write timeout is set for stream roundTripper to promptly +// find out broken status, which minimizes the number of messages +// sent on broken connection. +func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { + return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) +} + +// createPostRequest creates a HTTP POST request that sends raft message. +func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { + uu := u + uu.Path = path + req, err := http.NewRequest("POST", uu.String(), body) + if err != nil { + plog.Panicf("unexpected new request error (%v)", err) + } + req.Header.Set("Content-Type", ct) + req.Header.Set("X-Server-From", from.String()) + req.Header.Set("X-Server-Version", version.Version) + req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion) + req.Header.Set("X-Etcd-Cluster-ID", cid.String()) + setPeerURLsHeader(req, urls) + + return req +} + +// checkPostResponse checks the response of the HTTP POST request that sends +// raft message. +func checkPostResponse(resp *http.Response, body []byte, req *http.Request, to types.ID) error { + switch resp.StatusCode { + case http.StatusPreconditionFailed: + switch strings.TrimSuffix(string(body), "\n") { + case errIncompatibleVersion.Error(): + plog.Errorf("request sent was ignored by peer %s (server version incompatible)", to) + return errIncompatibleVersion + case errClusterIDMismatch.Error(): + plog.Errorf("request sent was ignored (cluster ID mismatch: remote[%s]=%s, local=%s)", + to, resp.Header.Get("X-Etcd-Cluster-ID"), req.Header.Get("X-Etcd-Cluster-ID")) + return errClusterIDMismatch + default: + return fmt.Errorf("unhandled error %q when precondition failed", string(body)) + } + case http.StatusForbidden: + return errMemberRemoved + case http.StatusNoContent: + return nil + default: + return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String()) + } +} + +// reportCriticalError reports the given error through sending it into +// the given error channel. +// If the error channel is filled up when sending error, it drops the error +// because the fact that error has happened is reported, which is +// good enough. +func reportCriticalError(err error, errc chan<- error) { + select { + case errc <- err: + default: + } +} + +// compareMajorMinorVersion returns an integer comparing two versions based on +// their major and minor version. The result will be 0 if a==b, -1 if a < b, +// and 1 if a > b. +func compareMajorMinorVersion(a, b *semver.Version) int { + na := &semver.Version{Major: a.Major, Minor: a.Minor} + nb := &semver.Version{Major: b.Major, Minor: b.Minor} + switch { + case na.LessThan(*nb): + return -1 + case nb.LessThan(*na): + return 1 + default: + return 0 + } +} + +// serverVersion returns the server version from the given header. +func serverVersion(h http.Header) *semver.Version { + verStr := h.Get("X-Server-Version") + // backward compatibility with etcd 2.0 + if verStr == "" { + verStr = "2.0.0" + } + return semver.Must(semver.NewVersion(verStr)) +} + +// serverVersion returns the min cluster version from the given header. +func minClusterVersion(h http.Header) *semver.Version { + verStr := h.Get("X-Min-Cluster-Version") + // backward compatibility with etcd 2.0 + if verStr == "" { + verStr = "2.0.0" + } + return semver.Must(semver.NewVersion(verStr)) +} + +// checkVersionCompability checks whether the given version is compatible +// with the local version. +func checkVersionCompability(name string, server, minCluster *semver.Version) error { + localServer := semver.Must(semver.NewVersion(version.Version)) + localMinCluster := semver.Must(semver.NewVersion(version.MinClusterVersion)) + if compareMajorMinorVersion(server, localMinCluster) == -1 { + return fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer) + } + if compareMajorMinorVersion(minCluster, localServer) == 1 { + return fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer) + } + return nil +} + +// setPeerURLsHeader reports local urls for peer discovery +func setPeerURLsHeader(req *http.Request, urls types.URLs) { + if urls == nil { + // often not set in unit tests + return + } + peerURLs := make([]string, urls.Len()) + for i := range urls { + peerURLs[i] = urls[i].String() + } + req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ",")) +} diff --git a/vendor/github.com/coreos/etcd/snap/BUILD b/vendor/github.com/coreos/etcd/snap/BUILD new file mode 100644 index 000000000000..f483b26fa669 --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/BUILD @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "db.go", + "message.go", + "metrics.go", + "snapshotter.go", + ], + importpath = "github.com/coreos/etcd/snap", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/ioutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", + "//vendor/github.com/coreos/etcd/snap/snappb:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/snap/snappb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go new file mode 100644 index 000000000000..01d897ae8611 --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/db.go @@ -0,0 +1,77 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/coreos/etcd/pkg/fileutil" +) + +var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") + +// SaveDBFrom saves snapshot of the database from the given reader. It +// guarantees the save operation is atomic. +func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { + f, err := ioutil.TempFile(s.dir, "tmp") + if err != nil { + return 0, err + } + var n int64 + n, err = io.Copy(f, r) + if err == nil { + err = fileutil.Fsync(f) + } + f.Close() + if err != nil { + os.Remove(f.Name()) + return n, err + } + fn := s.dbFilePath(id) + if fileutil.Exist(fn) { + os.Remove(f.Name()) + return n, nil + } + err = os.Rename(f.Name(), fn) + if err != nil { + os.Remove(f.Name()) + return n, err + } + + plog.Infof("saved database snapshot to disk [total bytes: %d]", n) + + return n, nil +} + +// DBFilePath returns the file path for the snapshot of the database with +// given id. If the snapshot does not exist, it returns error. +func (s *Snapshotter) DBFilePath(id uint64) (string, error) { + if _, err := fileutil.ReadDir(s.dir); err != nil { + return "", err + } + if fn := s.dbFilePath(id); fileutil.Exist(fn) { + return fn, nil + } + return "", ErrNoDBSnapshot +} + +func (s *Snapshotter) dbFilePath(id uint64) string { + return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) +} diff --git a/vendor/github.com/coreos/etcd/snap/message.go b/vendor/github.com/coreos/etcd/snap/message.go new file mode 100644 index 000000000000..d73713ff169b --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/message.go @@ -0,0 +1,64 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import ( + "io" + + "github.com/coreos/etcd/pkg/ioutil" + "github.com/coreos/etcd/raft/raftpb" +) + +// Message is a struct that contains a raft Message and a ReadCloser. The type +// of raft message MUST be MsgSnap, which contains the raft meta-data and an +// additional data []byte field that contains the snapshot of the actual state +// machine. +// Message contains the ReadCloser field for handling large snapshot. This avoid +// copying the entire snapshot into a byte array, which consumes a lot of memory. +// +// User of Message should close the Message after sending it. +type Message struct { + raftpb.Message + ReadCloser io.ReadCloser + TotalSize int64 + closeC chan bool +} + +func NewMessage(rs raftpb.Message, rc io.ReadCloser, rcSize int64) *Message { + return &Message{ + Message: rs, + ReadCloser: ioutil.NewExactReadCloser(rc, rcSize), + TotalSize: int64(rs.Size()) + rcSize, + closeC: make(chan bool, 1), + } +} + +// CloseNotify returns a channel that receives a single value +// when the message sent is finished. true indicates the sent +// is successful. +func (m Message) CloseNotify() <-chan bool { + return m.closeC +} + +func (m Message) CloseWithError(err error) { + if cerr := m.ReadCloser.Close(); cerr != nil { + err = cerr + } + if err == nil { + m.closeC <- true + } else { + m.closeC <- false + } +} diff --git a/vendor/github.com/coreos/etcd/snap/metrics.go b/vendor/github.com/coreos/etcd/snap/metrics.go new file mode 100644 index 000000000000..433ef09d4ba8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/metrics.go @@ -0,0 +1,41 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import "github.com/prometheus/client_golang/prometheus" + +var ( + // TODO: save_fsync latency? + saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "snap", + Name: "save_total_duration_seconds", + Help: "The total latency distributions of save called by snapshot.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) + + marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "snap", + Name: "save_marshalling_duration_seconds", + Help: "The marshalling cost distributions of save called by snapshot.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) +) + +func init() { + prometheus.MustRegister(saveDurations) + prometheus.MustRegister(marshallingDurations) +} diff --git a/vendor/github.com/coreos/etcd/snap/snappb/BUILD b/vendor/github.com/coreos/etcd/snap/snappb/BUILD new file mode 100644 index 000000000000..ea06259b4e69 --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/snappb/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["snap.pb.go"], + importpath = "github.com/coreos/etcd/snap/snappb", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go new file mode 100644 index 000000000000..05a77ff9d06c --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go @@ -0,0 +1,353 @@ +// Code generated by protoc-gen-gogo. +// source: snap.proto +// DO NOT EDIT! + +/* + Package snappb is a generated protocol buffer package. + + It is generated from these files: + snap.proto + + It has these top-level messages: + Snapshot +*/ +package snappb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Snapshot struct { + Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` + Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnap, []int{0} } + +func init() { + proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") +} +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintSnap(dAtA, i, uint64(m.Crc)) + if m.Data != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSnap(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Snap(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Snap(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintSnap(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Snapshot) Size() (n int) { + var l int + _ = l + n += 1 + sovSnap(uint64(m.Crc)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovSnap(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSnap(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSnap(x uint64) (n int) { + return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) + } + m.Crc = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Crc |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSnap + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnap(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnap + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSnap(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSnap + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSnap(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) } + +var fileDescriptorSnap = []byte{ + // 126 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, + 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, + 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, + 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, + 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24, + 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1, + 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, + 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.proto b/vendor/github.com/coreos/etcd/snap/snappb/snap.proto new file mode 100644 index 000000000000..cd3d21d0ee12 --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.proto @@ -0,0 +1,14 @@ +syntax = "proto2"; +package snappb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message snapshot { + optional uint32 crc = 1 [(gogoproto.nullable) = false]; + optional bytes data = 2; +} diff --git a/vendor/github.com/coreos/etcd/snap/snapshotter.go b/vendor/github.com/coreos/etcd/snap/snapshotter.go new file mode 100644 index 000000000000..007555921295 --- /dev/null +++ b/vendor/github.com/coreos/etcd/snap/snapshotter.go @@ -0,0 +1,204 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package snap stores raft nodes' states with snapshots. +package snap + +import ( + "errors" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "time" + + pioutil "github.com/coreos/etcd/pkg/ioutil" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap/snappb" + + "github.com/coreos/pkg/capnslog" +) + +const ( + snapSuffix = ".snap" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap") + + ErrNoSnapshot = errors.New("snap: no available snapshot") + ErrEmptySnapshot = errors.New("snap: empty snapshot") + ErrCRCMismatch = errors.New("snap: crc mismatch") + crcTable = crc32.MakeTable(crc32.Castagnoli) + + // A map of valid files that can be present in the snap folder. + validFiles = map[string]bool{ + "db": true, + } +) + +type Snapshotter struct { + dir string +} + +func New(dir string) *Snapshotter { + return &Snapshotter{ + dir: dir, + } +} + +func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { + if raft.IsEmptySnap(snapshot) { + return nil + } + return s.save(&snapshot) +} + +func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { + start := time.Now() + + fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) + b := pbutil.MustMarshal(snapshot) + crc := crc32.Update(0, crcTable, b) + snap := snappb.Snapshot{Crc: crc, Data: b} + d, err := snap.Marshal() + if err != nil { + return err + } else { + marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second)) + } + + err = pioutil.WriteAndSyncFile(filepath.Join(s.dir, fname), d, 0666) + if err == nil { + saveDurations.Observe(float64(time.Since(start)) / float64(time.Second)) + } else { + err1 := os.Remove(filepath.Join(s.dir, fname)) + if err1 != nil { + plog.Errorf("failed to remove broken snapshot file %s", filepath.Join(s.dir, fname)) + } + } + return err +} + +func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { + names, err := s.snapNames() + if err != nil { + return nil, err + } + var snap *raftpb.Snapshot + for _, name := range names { + if snap, err = loadSnap(s.dir, name); err == nil { + break + } + } + if err != nil { + return nil, ErrNoSnapshot + } + return snap, nil +} + +func loadSnap(dir, name string) (*raftpb.Snapshot, error) { + fpath := filepath.Join(dir, name) + snap, err := Read(fpath) + if err != nil { + renameBroken(fpath) + } + return snap, err +} + +// Read reads the snapshot named by snapname and returns the snapshot. +func Read(snapname string) (*raftpb.Snapshot, error) { + b, err := ioutil.ReadFile(snapname) + if err != nil { + plog.Errorf("cannot read file %v: %v", snapname, err) + return nil, err + } + + if len(b) == 0 { + plog.Errorf("unexpected empty snapshot") + return nil, ErrEmptySnapshot + } + + var serializedSnap snappb.Snapshot + if err = serializedSnap.Unmarshal(b); err != nil { + plog.Errorf("corrupted snapshot file %v: %v", snapname, err) + return nil, err + } + + if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { + plog.Errorf("unexpected empty snapshot") + return nil, ErrEmptySnapshot + } + + crc := crc32.Update(0, crcTable, serializedSnap.Data) + if crc != serializedSnap.Crc { + plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname) + return nil, ErrCRCMismatch + } + + var snap raftpb.Snapshot + if err = snap.Unmarshal(serializedSnap.Data); err != nil { + plog.Errorf("corrupted snapshot file %v: %v", snapname, err) + return nil, err + } + return &snap, nil +} + +// snapNames returns the filename of the snapshots in logical time order (from newest to oldest). +// If there is no available snapshots, an ErrNoSnapshot will be returned. +func (s *Snapshotter) snapNames() ([]string, error) { + dir, err := os.Open(s.dir) + if err != nil { + return nil, err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + snaps := checkSuffix(names) + if len(snaps) == 0 { + return nil, ErrNoSnapshot + } + sort.Sort(sort.Reverse(sort.StringSlice(snaps))) + return snaps, nil +} + +func checkSuffix(names []string) []string { + snaps := []string{} + for i := range names { + if strings.HasSuffix(names[i], snapSuffix) { + snaps = append(snaps, names[i]) + } else { + // If we find a file which is not a snapshot then check if it's + // a vaild file. If not throw out a warning. + if _, ok := validFiles[names[i]]; !ok { + plog.Warningf("skipped unexpected non snapshot file %v", names[i]) + } + } + } + return snaps +} + +func renameBroken(path string) { + brokenPath := path + ".broken" + if err := os.Rename(path, brokenPath); err != nil { + plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err) + } +} diff --git a/vendor/github.com/coreos/etcd/store/BUILD b/vendor/github.com/coreos/etcd/store/BUILD new file mode 100644 index 000000000000..17eeacac30b4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "event.go", + "event_history.go", + "event_queue.go", + "metrics.go", + "node.go", + "node_extern.go", + "stats.go", + "store.go", + "ttl_key_heap.go", + "watcher.go", + "watcher_hub.go", + ], + importpath = "github.com/coreos/etcd/store", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/error:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", + "//vendor/github.com/jonboulle/clockwork:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/store/doc.go b/vendor/github.com/coreos/etcd/store/doc.go new file mode 100644 index 000000000000..612df927976e --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package store defines etcd's in-memory key/value store. +package store diff --git a/vendor/github.com/coreos/etcd/store/event.go b/vendor/github.com/coreos/etcd/store/event.go new file mode 100644 index 000000000000..efcddb0e0538 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/event.go @@ -0,0 +1,71 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +const ( + Get = "get" + Create = "create" + Set = "set" + Update = "update" + Delete = "delete" + CompareAndSwap = "compareAndSwap" + CompareAndDelete = "compareAndDelete" + Expire = "expire" +) + +type Event struct { + Action string `json:"action"` + Node *NodeExtern `json:"node,omitempty"` + PrevNode *NodeExtern `json:"prevNode,omitempty"` + EtcdIndex uint64 `json:"-"` + Refresh bool `json:"refresh,omitempty"` +} + +func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event { + n := &NodeExtern{ + Key: key, + ModifiedIndex: modifiedIndex, + CreatedIndex: createdIndex, + } + + return &Event{ + Action: action, + Node: n, + } +} + +func (e *Event) IsCreated() bool { + if e.Action == Create { + return true + } + return e.Action == Set && e.PrevNode == nil +} + +func (e *Event) Index() uint64 { + return e.Node.ModifiedIndex +} + +func (e *Event) Clone() *Event { + return &Event{ + Action: e.Action, + EtcdIndex: e.EtcdIndex, + Node: e.Node.Clone(), + PrevNode: e.PrevNode.Clone(), + } +} + +func (e *Event) SetRefresh() { + e.Refresh = true +} diff --git a/vendor/github.com/coreos/etcd/store/event_history.go b/vendor/github.com/coreos/etcd/store/event_history.go new file mode 100644 index 000000000000..235d87a26649 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/event_history.go @@ -0,0 +1,129 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "fmt" + "path" + "strings" + "sync" + + etcdErr "github.com/coreos/etcd/error" +) + +type EventHistory struct { + Queue eventQueue + StartIndex uint64 + LastIndex uint64 + rwl sync.RWMutex +} + +func newEventHistory(capacity int) *EventHistory { + return &EventHistory{ + Queue: eventQueue{ + Capacity: capacity, + Events: make([]*Event, capacity), + }, + } +} + +// addEvent function adds event into the eventHistory +func (eh *EventHistory) addEvent(e *Event) *Event { + eh.rwl.Lock() + defer eh.rwl.Unlock() + + eh.Queue.insert(e) + + eh.LastIndex = e.Index() + + eh.StartIndex = eh.Queue.Events[eh.Queue.Front].Index() + + return e +} + +// scan enumerates events from the index history and stops at the first point +// where the key matches. +func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, *etcdErr.Error) { + eh.rwl.RLock() + defer eh.rwl.RUnlock() + + // index should be after the event history's StartIndex + if index < eh.StartIndex { + return nil, + etcdErr.NewError(etcdErr.EcodeEventIndexCleared, + fmt.Sprintf("the requested history has been cleared [%v/%v]", + eh.StartIndex, index), 0) + } + + // the index should come before the size of the queue minus the duplicate count + if index > eh.LastIndex { // future index + return nil, nil + } + + offset := index - eh.StartIndex + i := (eh.Queue.Front + int(offset)) % eh.Queue.Capacity + + for { + e := eh.Queue.Events[i] + + if !e.Refresh { + ok := (e.Node.Key == key) + + if recursive { + // add tailing slash + nkey := path.Clean(key) + if nkey[len(nkey)-1] != '/' { + nkey = nkey + "/" + } + + ok = ok || strings.HasPrefix(e.Node.Key, nkey) + } + + if (e.Action == Delete || e.Action == Expire) && e.PrevNode != nil && e.PrevNode.Dir { + ok = ok || strings.HasPrefix(key, e.PrevNode.Key) + } + + if ok { + return e, nil + } + } + + i = (i + 1) % eh.Queue.Capacity + + if i == eh.Queue.Back { + return nil, nil + } + } +} + +// clone will be protected by a stop-world lock +// do not need to obtain internal lock +func (eh *EventHistory) clone() *EventHistory { + clonedQueue := eventQueue{ + Capacity: eh.Queue.Capacity, + Events: make([]*Event, eh.Queue.Capacity), + Size: eh.Queue.Size, + Front: eh.Queue.Front, + Back: eh.Queue.Back, + } + + copy(clonedQueue.Events, eh.Queue.Events) + return &EventHistory{ + StartIndex: eh.StartIndex, + Queue: clonedQueue, + LastIndex: eh.LastIndex, + } + +} diff --git a/vendor/github.com/coreos/etcd/store/event_queue.go b/vendor/github.com/coreos/etcd/store/event_queue.go new file mode 100644 index 000000000000..767b835913e7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/event_queue.go @@ -0,0 +1,34 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +type eventQueue struct { + Events []*Event + Size int + Front int + Back int + Capacity int +} + +func (eq *eventQueue) insert(e *Event) { + eq.Events[eq.Back] = e + eq.Back = (eq.Back + 1) % eq.Capacity + + if eq.Size == eq.Capacity { //dequeue + eq.Front = (eq.Front + 1) % eq.Capacity + } else { + eq.Size++ + } +} diff --git a/vendor/github.com/coreos/etcd/store/metrics.go b/vendor/github.com/coreos/etcd/store/metrics.go new file mode 100644 index 000000000000..26404ba72e35 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/metrics.go @@ -0,0 +1,128 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// Set of raw Prometheus metrics. +// Labels +// * action = declared in event.go +// * outcome = Outcome +// Do not increment directly, use Report* methods. +var ( + readCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "store", + Name: "reads_total", + Help: "Total number of reads action by (get/getRecursive), local to this member.", + }, []string{"action"}) + + writeCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "store", + Name: "writes_total", + Help: "Total number of writes (e.g. set/compareAndDelete) seen by this member.", + }, []string{"action"}) + + readFailedCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "store", + Name: "reads_failed_total", + Help: "Failed read actions by (get/getRecursive), local to this member.", + }, []string{"action"}) + + writeFailedCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "store", + Name: "writes_failed_total", + Help: "Failed write actions (e.g. set/compareAndDelete), seen by this member.", + }, []string{"action"}) + + expireCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "store", + Name: "expires_total", + Help: "Total number of expired keys.", + }) + + watchRequests = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "store", + Name: "watch_requests_total", + Help: "Total number of incoming watch requests (new or reestablished).", + }) + + watcherCount = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "store", + Name: "watchers", + Help: "Count of currently active watchers.", + }) +) + +const ( + GetRecursive = "getRecursive" +) + +func init() { + prometheus.MustRegister(readCounter) + prometheus.MustRegister(writeCounter) + prometheus.MustRegister(expireCounter) + prometheus.MustRegister(watchRequests) + prometheus.MustRegister(watcherCount) +} + +func reportReadSuccess(read_action string) { + readCounter.WithLabelValues(read_action).Inc() +} + +func reportReadFailure(read_action string) { + readCounter.WithLabelValues(read_action).Inc() + readFailedCounter.WithLabelValues(read_action).Inc() +} + +func reportWriteSuccess(write_action string) { + writeCounter.WithLabelValues(write_action).Inc() +} + +func reportWriteFailure(write_action string) { + writeCounter.WithLabelValues(write_action).Inc() + writeFailedCounter.WithLabelValues(write_action).Inc() +} + +func reportExpiredKey() { + expireCounter.Inc() +} + +func reportWatchRequest() { + watchRequests.Inc() +} + +func reportWatcherAdded() { + watcherCount.Inc() +} + +func reportWatcherRemoved() { + watcherCount.Dec() +} diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/github.com/coreos/etcd/store/node.go new file mode 100644 index 000000000000..54159553500f --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/node.go @@ -0,0 +1,395 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "path" + "sort" + "time" + + etcdErr "github.com/coreos/etcd/error" + "github.com/jonboulle/clockwork" +) + +// explanations of Compare function result +const ( + CompareMatch = iota + CompareIndexNotMatch + CompareValueNotMatch + CompareNotMatch +) + +var Permanent time.Time + +// node is the basic element in the store system. +// A key-value pair will have a string value +// A directory will have a children map +type node struct { + Path string + + CreatedIndex uint64 + ModifiedIndex uint64 + + Parent *node `json:"-"` // should not encode this field! avoid circular dependency. + + ExpireTime time.Time + Value string // for key-value pair + Children map[string]*node // for directory + + // A reference to the store this node is attached to. + store *store +} + +// newKV creates a Key-Value pair +func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node { + return &node{ + Path: nodePath, + CreatedIndex: createdIndex, + ModifiedIndex: createdIndex, + Parent: parent, + store: store, + ExpireTime: expireTime, + Value: value, + } +} + +// newDir creates a directory +func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node { + return &node{ + Path: nodePath, + CreatedIndex: createdIndex, + ModifiedIndex: createdIndex, + Parent: parent, + ExpireTime: expireTime, + Children: make(map[string]*node), + store: store, + } +} + +// IsHidden function checks if the node is a hidden node. A hidden node +// will begin with '_' +// A hidden node will not be shown via get command under a directory +// For example if we have /foo/_hidden and /foo/notHidden, get "/foo" +// will only return /foo/notHidden +func (n *node) IsHidden() bool { + _, name := path.Split(n.Path) + + return name[0] == '_' +} + +// IsPermanent function checks if the node is a permanent one. +func (n *node) IsPermanent() bool { + // we use a uninitialized time.Time to indicate the node is a + // permanent one. + // the uninitialized time.Time should equal zero. + return n.ExpireTime.IsZero() +} + +// IsDir function checks whether the node is a directory. +// If the node is a directory, the function will return true. +// Otherwise the function will return false. +func (n *node) IsDir() bool { + return n.Children != nil +} + +// Read function gets the value of the node. +// If the receiver node is not a key-value pair, a "Not A File" error will be returned. +func (n *node) Read() (string, *etcdErr.Error) { + if n.IsDir() { + return "", etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex) + } + + return n.Value, nil +} + +// Write function set the value of the node to the given value. +// If the receiver node is a directory, a "Not A File" error will be returned. +func (n *node) Write(value string, index uint64) *etcdErr.Error { + if n.IsDir() { + return etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex) + } + + n.Value = value + n.ModifiedIndex = index + + return nil +} + +func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) { + if !n.IsPermanent() { + /* compute ttl as: + ceiling( (expireTime - timeNow) / nanosecondsPerSecond ) + which ranges from 1..n + rather than as: + ( (expireTime - timeNow) / nanosecondsPerSecond ) + 1 + which ranges 1..n+1 + */ + ttlN := n.ExpireTime.Sub(clock.Now()) + ttl := ttlN / time.Second + if (ttlN % time.Second) > 0 { + ttl++ + } + t := n.ExpireTime.UTC() + return &t, int64(ttl) + } + return nil, 0 +} + +// List function return a slice of nodes under the receiver node. +// If the receiver node is not a directory, a "Not A Directory" error will be returned. +func (n *node) List() ([]*node, *etcdErr.Error) { + if !n.IsDir() { + return nil, etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex) + } + + nodes := make([]*node, len(n.Children)) + + i := 0 + for _, node := range n.Children { + nodes[i] = node + i++ + } + + return nodes, nil +} + +// GetChild function returns the child node under the directory node. +// On success, it returns the file node +func (n *node) GetChild(name string) (*node, *etcdErr.Error) { + if !n.IsDir() { + return nil, etcdErr.NewError(etcdErr.EcodeNotDir, n.Path, n.store.CurrentIndex) + } + + child, ok := n.Children[name] + + if ok { + return child, nil + } + + return nil, nil +} + +// Add function adds a node to the receiver node. +// If the receiver is not a directory, a "Not A Directory" error will be returned. +// If there is an existing node with the same name under the directory, a "Already Exist" +// error will be returned +func (n *node) Add(child *node) *etcdErr.Error { + if !n.IsDir() { + return etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex) + } + + _, name := path.Split(child.Path) + + if _, ok := n.Children[name]; ok { + return etcdErr.NewError(etcdErr.EcodeNodeExist, "", n.store.CurrentIndex) + } + + n.Children[name] = child + + return nil +} + +// Remove function remove the node. +func (n *node) Remove(dir, recursive bool, callback func(path string)) *etcdErr.Error { + if !n.IsDir() { // key-value pair + _, name := path.Split(n.Path) + + // find its parent and remove the node from the map + if n.Parent != nil && n.Parent.Children[name] == n { + delete(n.Parent.Children, name) + } + + if callback != nil { + callback(n.Path) + } + + if !n.IsPermanent() { + n.store.ttlKeyHeap.remove(n) + } + + return nil + } + + if !dir { + // cannot delete a directory without dir set to true + return etcdErr.NewError(etcdErr.EcodeNotFile, n.Path, n.store.CurrentIndex) + } + + if len(n.Children) != 0 && !recursive { + // cannot delete a directory if it is not empty and the operation + // is not recursive + return etcdErr.NewError(etcdErr.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex) + } + + for _, child := range n.Children { // delete all children + child.Remove(true, true, callback) + } + + // delete self + _, name := path.Split(n.Path) + if n.Parent != nil && n.Parent.Children[name] == n { + delete(n.Parent.Children, name) + + if callback != nil { + callback(n.Path) + } + + if !n.IsPermanent() { + n.store.ttlKeyHeap.remove(n) + } + } + + return nil +} + +func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern { + if n.IsDir() { + node := &NodeExtern{ + Key: n.Path, + Dir: true, + ModifiedIndex: n.ModifiedIndex, + CreatedIndex: n.CreatedIndex, + } + node.Expiration, node.TTL = n.expirationAndTTL(clock) + + if !recursive { + return node + } + + children, _ := n.List() + node.Nodes = make(NodeExterns, len(children)) + + // we do not use the index in the children slice directly + // we need to skip the hidden one + i := 0 + + for _, child := range children { + + if child.IsHidden() { // get will not list hidden node + continue + } + + node.Nodes[i] = child.Repr(recursive, sorted, clock) + + i++ + } + + // eliminate hidden nodes + node.Nodes = node.Nodes[:i] + if sorted { + sort.Sort(node.Nodes) + } + + return node + } + + // since n.Value could be changed later, so we need to copy the value out + value := n.Value + node := &NodeExtern{ + Key: n.Path, + Value: &value, + ModifiedIndex: n.ModifiedIndex, + CreatedIndex: n.CreatedIndex, + } + node.Expiration, node.TTL = n.expirationAndTTL(clock) + return node +} + +func (n *node) UpdateTTL(expireTime time.Time) { + if !n.IsPermanent() { + if expireTime.IsZero() { + // from ttl to permanent + n.ExpireTime = expireTime + // remove from ttl heap + n.store.ttlKeyHeap.remove(n) + return + } + + // update ttl + n.ExpireTime = expireTime + // update ttl heap + n.store.ttlKeyHeap.update(n) + return + } + + if expireTime.IsZero() { + return + } + + // from permanent to ttl + n.ExpireTime = expireTime + // push into ttl heap + n.store.ttlKeyHeap.push(n) +} + +// Compare function compares node index and value with provided ones. +// second result value explains result and equals to one of Compare.. constants +func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) { + indexMatch := (prevIndex == 0 || n.ModifiedIndex == prevIndex) + valueMatch := (prevValue == "" || n.Value == prevValue) + ok = valueMatch && indexMatch + switch { + case valueMatch && indexMatch: + which = CompareMatch + case indexMatch && !valueMatch: + which = CompareValueNotMatch + case valueMatch && !indexMatch: + which = CompareIndexNotMatch + default: + which = CompareNotMatch + } + return +} + +// Clone function clone the node recursively and return the new node. +// If the node is a directory, it will clone all the content under this directory. +// If the node is a key-value pair, it will clone the pair. +func (n *node) Clone() *node { + if !n.IsDir() { + newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime) + newkv.ModifiedIndex = n.ModifiedIndex + return newkv + } + + clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime) + clone.ModifiedIndex = n.ModifiedIndex + + for key, child := range n.Children { + clone.Children[key] = child.Clone() + } + + return clone +} + +// recoverAndclean function help to do recovery. +// Two things need to be done: 1. recovery structure; 2. delete expired nodes +// +// If the node is a directory, it will help recover children's parent pointer and recursively +// call this function on its children. +// We check the expire last since we need to recover the whole structure first and add all the +// notifications into the event history. +func (n *node) recoverAndclean() { + if n.IsDir() { + for _, child := range n.Children { + child.Parent = n + child.store = n.store + child.recoverAndclean() + } + } + + if !n.ExpireTime.IsZero() { + n.store.ttlKeyHeap.push(n) + } +} diff --git a/vendor/github.com/coreos/etcd/store/node_extern.go b/vendor/github.com/coreos/etcd/store/node_extern.go new file mode 100644 index 000000000000..7ba870cbe7b8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/node_extern.go @@ -0,0 +1,116 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "sort" + "time" + + "github.com/jonboulle/clockwork" +) + +// NodeExtern is the external representation of the +// internal node with additional fields +// PrevValue is the previous value of the node +// TTL is time to live in second +type NodeExtern struct { + Key string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + Dir bool `json:"dir,omitempty"` + Expiration *time.Time `json:"expiration,omitempty"` + TTL int64 `json:"ttl,omitempty"` + Nodes NodeExterns `json:"nodes,omitempty"` + ModifiedIndex uint64 `json:"modifiedIndex,omitempty"` + CreatedIndex uint64 `json:"createdIndex,omitempty"` +} + +func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) { + if n.IsDir() { // node is a directory + eNode.Dir = true + + children, _ := n.List() + eNode.Nodes = make(NodeExterns, len(children)) + + // we do not use the index in the children slice directly + // we need to skip the hidden one + i := 0 + + for _, child := range children { + if child.IsHidden() { // get will not return hidden nodes + continue + } + + eNode.Nodes[i] = child.Repr(recursive, sorted, clock) + i++ + } + + // eliminate hidden nodes + eNode.Nodes = eNode.Nodes[:i] + + if sorted { + sort.Sort(eNode.Nodes) + } + + } else { // node is a file + value, _ := n.Read() + eNode.Value = &value + } + + eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock) +} + +func (eNode *NodeExtern) Clone() *NodeExtern { + if eNode == nil { + return nil + } + nn := &NodeExtern{ + Key: eNode.Key, + Dir: eNode.Dir, + TTL: eNode.TTL, + ModifiedIndex: eNode.ModifiedIndex, + CreatedIndex: eNode.CreatedIndex, + } + if eNode.Value != nil { + s := *eNode.Value + nn.Value = &s + } + if eNode.Expiration != nil { + t := *eNode.Expiration + nn.Expiration = &t + } + if eNode.Nodes != nil { + nn.Nodes = make(NodeExterns, len(eNode.Nodes)) + for i, n := range eNode.Nodes { + nn.Nodes[i] = n.Clone() + } + } + return nn +} + +type NodeExterns []*NodeExtern + +// interfaces for sorting + +func (ns NodeExterns) Len() int { + return len(ns) +} + +func (ns NodeExterns) Less(i, j int) bool { + return ns[i].Key < ns[j].Key +} + +func (ns NodeExterns) Swap(i, j int) { + ns[i], ns[j] = ns[j], ns[i] +} diff --git a/vendor/github.com/coreos/etcd/store/stats.go b/vendor/github.com/coreos/etcd/store/stats.go new file mode 100644 index 000000000000..59b45f2b8b3b --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/stats.go @@ -0,0 +1,146 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "encoding/json" + "sync/atomic" +) + +const ( + SetSuccess = iota + SetFail + DeleteSuccess + DeleteFail + CreateSuccess + CreateFail + UpdateSuccess + UpdateFail + CompareAndSwapSuccess + CompareAndSwapFail + GetSuccess + GetFail + ExpireCount + CompareAndDeleteSuccess + CompareAndDeleteFail +) + +type Stats struct { + + // Number of get requests + + GetSuccess uint64 `json:"getsSuccess"` + GetFail uint64 `json:"getsFail"` + + // Number of sets requests + + SetSuccess uint64 `json:"setsSuccess"` + SetFail uint64 `json:"setsFail"` + + // Number of delete requests + + DeleteSuccess uint64 `json:"deleteSuccess"` + DeleteFail uint64 `json:"deleteFail"` + + // Number of update requests + + UpdateSuccess uint64 `json:"updateSuccess"` + UpdateFail uint64 `json:"updateFail"` + + // Number of create requests + + CreateSuccess uint64 `json:"createSuccess"` + CreateFail uint64 `json:"createFail"` + + // Number of testAndSet requests + + CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"` + CompareAndSwapFail uint64 `json:"compareAndSwapFail"` + + // Number of compareAndDelete requests + + CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"` + CompareAndDeleteFail uint64 `json:"compareAndDeleteFail"` + + ExpireCount uint64 `json:"expireCount"` + + Watchers uint64 `json:"watchers"` +} + +func newStats() *Stats { + s := new(Stats) + return s +} + +func (s *Stats) clone() *Stats { + return &Stats{ + GetSuccess: s.GetSuccess, + GetFail: s.GetFail, + SetSuccess: s.SetSuccess, + SetFail: s.SetFail, + DeleteSuccess: s.DeleteSuccess, + DeleteFail: s.DeleteFail, + UpdateSuccess: s.UpdateSuccess, + UpdateFail: s.UpdateFail, + CreateSuccess: s.CreateSuccess, + CreateFail: s.CreateFail, + CompareAndSwapSuccess: s.CompareAndSwapSuccess, + CompareAndSwapFail: s.CompareAndSwapFail, + CompareAndDeleteSuccess: s.CompareAndDeleteSuccess, + CompareAndDeleteFail: s.CompareAndDeleteFail, + ExpireCount: s.ExpireCount, + Watchers: s.Watchers, + } +} + +func (s *Stats) toJson() []byte { + b, _ := json.Marshal(s) + return b +} + +func (s *Stats) Inc(field int) { + switch field { + case SetSuccess: + atomic.AddUint64(&s.SetSuccess, 1) + case SetFail: + atomic.AddUint64(&s.SetFail, 1) + case CreateSuccess: + atomic.AddUint64(&s.CreateSuccess, 1) + case CreateFail: + atomic.AddUint64(&s.CreateFail, 1) + case DeleteSuccess: + atomic.AddUint64(&s.DeleteSuccess, 1) + case DeleteFail: + atomic.AddUint64(&s.DeleteFail, 1) + case GetSuccess: + atomic.AddUint64(&s.GetSuccess, 1) + case GetFail: + atomic.AddUint64(&s.GetFail, 1) + case UpdateSuccess: + atomic.AddUint64(&s.UpdateSuccess, 1) + case UpdateFail: + atomic.AddUint64(&s.UpdateFail, 1) + case CompareAndSwapSuccess: + atomic.AddUint64(&s.CompareAndSwapSuccess, 1) + case CompareAndSwapFail: + atomic.AddUint64(&s.CompareAndSwapFail, 1) + case CompareAndDeleteSuccess: + atomic.AddUint64(&s.CompareAndDeleteSuccess, 1) + case CompareAndDeleteFail: + atomic.AddUint64(&s.CompareAndDeleteFail, 1) + case ExpireCount: + atomic.AddUint64(&s.ExpireCount, 1) + } +} diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/github.com/coreos/etcd/store/store.go new file mode 100644 index 000000000000..edf7f21942b9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/store.go @@ -0,0 +1,791 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "encoding/json" + "fmt" + "path" + "strconv" + "strings" + "sync" + "time" + + etcdErr "github.com/coreos/etcd/error" + "github.com/coreos/etcd/pkg/types" + "github.com/jonboulle/clockwork" +) + +// The default version to set when the store is first initialized. +const defaultVersion = 2 + +var minExpireTime time.Time + +func init() { + minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z") +} + +type Store interface { + Version() int + Index() uint64 + + Get(nodePath string, recursive, sorted bool) (*Event, error) + Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) + Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) + Create(nodePath string, dir bool, value string, unique bool, + expireOpts TTLOptionSet) (*Event, error) + CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, + value string, expireOpts TTLOptionSet) (*Event, error) + Delete(nodePath string, dir, recursive bool) (*Event, error) + CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) + + Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error) + + Save() ([]byte, error) + Recovery(state []byte) error + + Clone() Store + SaveNoCopy() ([]byte, error) + + JsonStats() []byte + DeleteExpiredKeys(cutoff time.Time) + + HasTTLKeys() bool +} + +type TTLOptionSet struct { + ExpireTime time.Time + Refresh bool +} + +type store struct { + Root *node + WatcherHub *watcherHub + CurrentIndex uint64 + Stats *Stats + CurrentVersion int + ttlKeyHeap *ttlKeyHeap // need to recovery manually + worldLock sync.RWMutex // stop the world lock + clock clockwork.Clock + readonlySet types.Set +} + +// New creates a store where the given namespaces will be created as initial directories. +func New(namespaces ...string) Store { + s := newStore(namespaces...) + s.clock = clockwork.NewRealClock() + return s +} + +func newStore(namespaces ...string) *store { + s := new(store) + s.CurrentVersion = defaultVersion + s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent) + for _, namespace := range namespaces { + s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent)) + } + s.Stats = newStats() + s.WatcherHub = newWatchHub(1000) + s.ttlKeyHeap = newTtlKeyHeap() + s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...) + return s +} + +// Version retrieves current version of the store. +func (s *store) Version() int { + return s.CurrentVersion +} + +// Index retrieves the current index of the store. +func (s *store) Index() uint64 { + s.worldLock.RLock() + defer s.worldLock.RUnlock() + return s.CurrentIndex +} + +// Get returns a get event. +// If recursive is true, it will return all the content under the node path. +// If sorted is true, it will sort the content by keys. +func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) { + var err *etcdErr.Error + + s.worldLock.RLock() + defer s.worldLock.RUnlock() + + defer func() { + if err == nil { + s.Stats.Inc(GetSuccess) + if recursive { + reportReadSuccess(GetRecursive) + } else { + reportReadSuccess(Get) + } + return + } + + s.Stats.Inc(GetFail) + if recursive { + reportReadFailure(GetRecursive) + } else { + reportReadFailure(Get) + } + }() + + n, err := s.internalGet(nodePath) + if err != nil { + return nil, err + } + + e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) + e.EtcdIndex = s.CurrentIndex + e.Node.loadInternalNode(n, recursive, sorted, s.clock) + + return e, nil +} + +// Create creates the node at nodePath. Create will help to create intermediate directories with no ttl. +// If the node has already existed, create will fail. +// If any node on the path is a file, create will fail. +func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) { + var err *etcdErr.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(CreateSuccess) + reportWriteSuccess(Create) + return + } + + s.Stats.Inc(CreateFail) + reportWriteFailure(Create) + }() + + e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create) + if err != nil { + return nil, err + } + + e.EtcdIndex = s.CurrentIndex + s.WatcherHub.notify(e) + + return e, nil +} + +// Set creates or replace the node at nodePath. +func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) { + var err *etcdErr.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(SetSuccess) + reportWriteSuccess(Set) + return + } + + s.Stats.Inc(SetFail) + reportWriteFailure(Set) + }() + + // Get prevNode value + n, getErr := s.internalGet(nodePath) + if getErr != nil && getErr.ErrorCode != etcdErr.EcodeKeyNotFound { + err = getErr + return nil, err + } + + if expireOpts.Refresh { + if getErr != nil { + err = getErr + return nil, err + } else { + value = n.Value + } + } + + // Set new value + e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set) + if err != nil { + return nil, err + } + e.EtcdIndex = s.CurrentIndex + + // Put prevNode into event + if getErr == nil { + prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) + prev.Node.loadInternalNode(n, false, false, s.clock) + e.PrevNode = prev.Node + } + + if !expireOpts.Refresh { + s.WatcherHub.notify(e) + } else { + e.SetRefresh() + s.WatcherHub.add(e) + } + + return e, nil +} + +// returns user-readable cause of failed comparison +func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string { + switch which { + case CompareIndexNotMatch: + return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex) + case CompareValueNotMatch: + return fmt.Sprintf("[%v != %v]", prevValue, n.Value) + default: + return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex) + } +} + +func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, + value string, expireOpts TTLOptionSet) (*Event, error) { + + var err *etcdErr.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(CompareAndSwapSuccess) + reportWriteSuccess(CompareAndSwap) + return + } + + s.Stats.Inc(CompareAndSwapFail) + reportWriteFailure(CompareAndSwap) + }() + + nodePath = path.Clean(path.Join("/", nodePath)) + // we do not allow the user to change "/" + if s.readonlySet.Contains(nodePath) { + return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) + } + + n, err := s.internalGet(nodePath) + if err != nil { + return nil, err + } + if n.IsDir() { // can only compare and swap file + err = etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex) + return nil, err + } + + // If both of the prevValue and prevIndex are given, we will test both of them. + // Command will be executed, only if both of the tests are successful. + if ok, which := n.Compare(prevValue, prevIndex); !ok { + cause := getCompareFailCause(n, which, prevValue, prevIndex) + err = etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex) + return nil, err + } + + if expireOpts.Refresh { + value = n.Value + } + + // update etcd index + s.CurrentIndex++ + + e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex) + e.EtcdIndex = s.CurrentIndex + e.PrevNode = n.Repr(false, false, s.clock) + eNode := e.Node + + // if test succeed, write the value + n.Write(value, s.CurrentIndex) + n.UpdateTTL(expireOpts.ExpireTime) + + // copy the value for safety + valueCopy := value + eNode.Value = &valueCopy + eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) + + if !expireOpts.Refresh { + s.WatcherHub.notify(e) + } else { + e.SetRefresh() + s.WatcherHub.add(e) + } + + return e, nil +} + +// Delete deletes the node at the given path. +// If the node is a directory, recursive must be true to delete it. +func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) { + var err *etcdErr.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(DeleteSuccess) + reportWriteSuccess(Delete) + return + } + + s.Stats.Inc(DeleteFail) + reportWriteFailure(Delete) + }() + + nodePath = path.Clean(path.Join("/", nodePath)) + // we do not allow the user to change "/" + if s.readonlySet.Contains(nodePath) { + return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) + } + + // recursive implies dir + if recursive { + dir = true + } + + n, err := s.internalGet(nodePath) + if err != nil { // if the node does not exist, return error + return nil, err + } + + nextIndex := s.CurrentIndex + 1 + e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex) + e.EtcdIndex = nextIndex + e.PrevNode = n.Repr(false, false, s.clock) + eNode := e.Node + + if n.IsDir() { + eNode.Dir = true + } + + callback := func(path string) { // notify function + // notify the watchers with deleted set true + s.WatcherHub.notifyWatchers(e, path, true) + } + + err = n.Remove(dir, recursive, callback) + if err != nil { + return nil, err + } + + // update etcd index + s.CurrentIndex++ + + s.WatcherHub.notify(e) + + return e, nil +} + +func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) { + var err *etcdErr.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(CompareAndDeleteSuccess) + reportWriteSuccess(CompareAndDelete) + return + } + + s.Stats.Inc(CompareAndDeleteFail) + reportWriteFailure(CompareAndDelete) + }() + + nodePath = path.Clean(path.Join("/", nodePath)) + + n, err := s.internalGet(nodePath) + if err != nil { // if the node does not exist, return error + return nil, err + } + if n.IsDir() { // can only compare and delete file + return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex) + } + + // If both of the prevValue and prevIndex are given, we will test both of them. + // Command will be executed, only if both of the tests are successful. + if ok, which := n.Compare(prevValue, prevIndex); !ok { + cause := getCompareFailCause(n, which, prevValue, prevIndex) + return nil, etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex) + } + + // update etcd index + s.CurrentIndex++ + + e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex) + e.EtcdIndex = s.CurrentIndex + e.PrevNode = n.Repr(false, false, s.clock) + + callback := func(path string) { // notify function + // notify the watchers with deleted set true + s.WatcherHub.notifyWatchers(e, path, true) + } + + err = n.Remove(false, false, callback) + if err != nil { + return nil, err + } + + s.WatcherHub.notify(e) + + return e, nil +} + +func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) { + s.worldLock.RLock() + defer s.worldLock.RUnlock() + + key = path.Clean(path.Join("/", key)) + if sinceIndex == 0 { + sinceIndex = s.CurrentIndex + 1 + } + // WatcherHub does not know about the current index, so we need to pass it in + w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex) + if err != nil { + return nil, err + } + + return w, nil +} + +// walk walks all the nodePath and apply the walkFunc on each directory +func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *etcdErr.Error)) (*node, *etcdErr.Error) { + components := strings.Split(nodePath, "/") + + curr := s.Root + var err *etcdErr.Error + + for i := 1; i < len(components); i++ { + if len(components[i]) == 0 { // ignore empty string + return curr, nil + } + + curr, err = walkFunc(curr, components[i]) + if err != nil { + return nil, err + } + } + + return curr, nil +} + +// Update updates the value/ttl of the node. +// If the node is a file, the value and the ttl can be updated. +// If the node is a directory, only the ttl can be updated. +func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) { + var err *etcdErr.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(UpdateSuccess) + reportWriteSuccess(Update) + return + } + + s.Stats.Inc(UpdateFail) + reportWriteFailure(Update) + }() + + nodePath = path.Clean(path.Join("/", nodePath)) + // we do not allow the user to change "/" + if s.readonlySet.Contains(nodePath) { + return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) + } + + currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 + + n, err := s.internalGet(nodePath) + if err != nil { // if the node does not exist, return error + return nil, err + } + if n.IsDir() && len(newValue) != 0 { + // if the node is a directory, we cannot update value to non-empty + return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex) + } + + if expireOpts.Refresh { + newValue = n.Value + } + + e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex) + e.EtcdIndex = nextIndex + e.PrevNode = n.Repr(false, false, s.clock) + eNode := e.Node + + n.Write(newValue, nextIndex) + + if n.IsDir() { + eNode.Dir = true + } else { + // copy the value for safety + newValueCopy := newValue + eNode.Value = &newValueCopy + } + + // update ttl + n.UpdateTTL(expireOpts.ExpireTime) + + eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) + + if !expireOpts.Refresh { + s.WatcherHub.notify(e) + } else { + e.SetRefresh() + s.WatcherHub.add(e) + } + + s.CurrentIndex = nextIndex + + return e, nil +} + +func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool, + expireTime time.Time, action string) (*Event, *etcdErr.Error) { + + currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 + + if unique { // append unique item under the node path + nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10)) + } + + nodePath = path.Clean(path.Join("/", nodePath)) + + // we do not allow the user to change "/" + if s.readonlySet.Contains(nodePath) { + return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", currIndex) + } + + // Assume expire times that are way in the past are + // This can occur when the time is serialized to JS + if expireTime.Before(minExpireTime) { + expireTime = Permanent + } + + dirName, nodeName := path.Split(nodePath) + + // walk through the nodePath, create dirs and get the last directory node + d, err := s.walk(dirName, s.checkDir) + + if err != nil { + s.Stats.Inc(SetFail) + reportWriteFailure(action) + err.Index = currIndex + return nil, err + } + + e := newEvent(action, nodePath, nextIndex, nextIndex) + eNode := e.Node + + n, _ := d.GetChild(nodeName) + + // force will try to replace an existing file + if n != nil { + if replace { + if n.IsDir() { + return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex) + } + e.PrevNode = n.Repr(false, false, s.clock) + + n.Remove(false, false, nil) + } else { + return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, currIndex) + } + } + + if !dir { // create file + // copy the value for safety + valueCopy := value + eNode.Value = &valueCopy + + n = newKV(s, nodePath, value, nextIndex, d, expireTime) + + } else { // create directory + eNode.Dir = true + + n = newDir(s, nodePath, nextIndex, d, expireTime) + } + + // we are sure d is a directory and does not have the children with name n.Name + d.Add(n) + + // node with TTL + if !n.IsPermanent() { + s.ttlKeyHeap.push(n) + + eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) + } + + s.CurrentIndex = nextIndex + + return e, nil +} + +// InternalGet gets the node of the given nodePath. +func (s *store) internalGet(nodePath string) (*node, *etcdErr.Error) { + nodePath = path.Clean(path.Join("/", nodePath)) + + walkFunc := func(parent *node, name string) (*node, *etcdErr.Error) { + + if !parent.IsDir() { + err := etcdErr.NewError(etcdErr.EcodeNotDir, parent.Path, s.CurrentIndex) + return nil, err + } + + child, ok := parent.Children[name] + if ok { + return child, nil + } + + return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex) + } + + f, err := s.walk(nodePath, walkFunc) + + if err != nil { + return nil, err + } + return f, nil +} + +// DeleteExpiredKeys will delete all expired keys +func (s *store) DeleteExpiredKeys(cutoff time.Time) { + s.worldLock.Lock() + defer s.worldLock.Unlock() + + for { + node := s.ttlKeyHeap.top() + if node == nil || node.ExpireTime.After(cutoff) { + break + } + + s.CurrentIndex++ + e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) + e.EtcdIndex = s.CurrentIndex + e.PrevNode = node.Repr(false, false, s.clock) + if node.IsDir() { + e.Node.Dir = true + } + + callback := func(path string) { // notify function + // notify the watchers with deleted set true + s.WatcherHub.notifyWatchers(e, path, true) + } + + s.ttlKeyHeap.pop() + node.Remove(true, true, callback) + + reportExpiredKey() + s.Stats.Inc(ExpireCount) + + s.WatcherHub.notify(e) + } + +} + +// checkDir will check whether the component is a directory under parent node. +// If it is a directory, this function will return the pointer to that node. +// If it does not exist, this function will create a new directory and return the pointer to that node. +// If it is a file, this function will return error. +func (s *store) checkDir(parent *node, dirName string) (*node, *etcdErr.Error) { + node, ok := parent.Children[dirName] + + if ok { + if node.IsDir() { + return node, nil + } + + return nil, etcdErr.NewError(etcdErr.EcodeNotDir, node.Path, s.CurrentIndex) + } + + n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent) + + parent.Children[dirName] = n + + return n, nil +} + +// Save saves the static state of the store system. +// It will not be able to save the state of watchers. +// It will not save the parent field of the node. Or there will +// be cyclic dependencies issue for the json package. +func (s *store) Save() ([]byte, error) { + b, err := json.Marshal(s.Clone()) + if err != nil { + return nil, err + } + + return b, nil +} + +func (s *store) SaveNoCopy() ([]byte, error) { + b, err := json.Marshal(s) + if err != nil { + return nil, err + } + + return b, nil +} + +func (s *store) Clone() Store { + s.worldLock.Lock() + + clonedStore := newStore() + clonedStore.CurrentIndex = s.CurrentIndex + clonedStore.Root = s.Root.Clone() + clonedStore.WatcherHub = s.WatcherHub.clone() + clonedStore.Stats = s.Stats.clone() + clonedStore.CurrentVersion = s.CurrentVersion + + s.worldLock.Unlock() + return clonedStore +} + +// Recovery recovers the store system from a static state +// It needs to recover the parent field of the nodes. +// It needs to delete the expired nodes since the saved time and also +// needs to create monitoring go routines. +func (s *store) Recovery(state []byte) error { + s.worldLock.Lock() + defer s.worldLock.Unlock() + err := json.Unmarshal(state, s) + + if err != nil { + return err + } + + s.ttlKeyHeap = newTtlKeyHeap() + + s.Root.recoverAndclean() + return nil +} + +func (s *store) JsonStats() []byte { + s.Stats.Watchers = uint64(s.WatcherHub.count) + return s.Stats.toJson() +} + +func (s *store) HasTTLKeys() bool { + s.worldLock.RLock() + defer s.worldLock.RUnlock() + return s.ttlKeyHeap.Len() != 0 +} diff --git a/vendor/github.com/coreos/etcd/store/ttl_key_heap.go b/vendor/github.com/coreos/etcd/store/ttl_key_heap.go new file mode 100644 index 000000000000..21ae9b7c6999 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/ttl_key_heap.go @@ -0,0 +1,99 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "container/heap" +) + +// An TTLKeyHeap is a min-heap of TTLKeys order by expiration time +type ttlKeyHeap struct { + array []*node + keyMap map[*node]int +} + +func newTtlKeyHeap() *ttlKeyHeap { + h := &ttlKeyHeap{keyMap: make(map[*node]int)} + heap.Init(h) + return h +} + +func (h ttlKeyHeap) Len() int { + return len(h.array) +} + +func (h ttlKeyHeap) Less(i, j int) bool { + return h.array[i].ExpireTime.Before(h.array[j].ExpireTime) +} + +func (h ttlKeyHeap) Swap(i, j int) { + // swap node + h.array[i], h.array[j] = h.array[j], h.array[i] + + // update map + h.keyMap[h.array[i]] = i + h.keyMap[h.array[j]] = j +} + +func (h *ttlKeyHeap) Push(x interface{}) { + n, _ := x.(*node) + h.keyMap[n] = len(h.array) + h.array = append(h.array, n) +} + +func (h *ttlKeyHeap) Pop() interface{} { + old := h.array + n := len(old) + x := old[n-1] + // Set slice element to nil, so GC can recycle the node. + // This is due to golang GC doesn't support partial recycling: + // https://github.com/golang/go/issues/9618 + old[n-1] = nil + h.array = old[0 : n-1] + delete(h.keyMap, x) + return x +} + +func (h *ttlKeyHeap) top() *node { + if h.Len() != 0 { + return h.array[0] + } + return nil +} + +func (h *ttlKeyHeap) pop() *node { + x := heap.Pop(h) + n, _ := x.(*node) + return n +} + +func (h *ttlKeyHeap) push(x interface{}) { + heap.Push(h, x) +} + +func (h *ttlKeyHeap) update(n *node) { + index, ok := h.keyMap[n] + if ok { + heap.Remove(h, index) + heap.Push(h, n) + } +} + +func (h *ttlKeyHeap) remove(n *node) { + index, ok := h.keyMap[n] + if ok { + heap.Remove(h, index) + } +} diff --git a/vendor/github.com/coreos/etcd/store/watcher.go b/vendor/github.com/coreos/etcd/store/watcher.go new file mode 100644 index 000000000000..a236ec777037 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/watcher.go @@ -0,0 +1,95 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +type Watcher interface { + EventChan() chan *Event + StartIndex() uint64 // The EtcdIndex at which the Watcher was created + Remove() +} + +type watcher struct { + eventChan chan *Event + stream bool + recursive bool + sinceIndex uint64 + startIndex uint64 + hub *watcherHub + removed bool + remove func() +} + +func (w *watcher) EventChan() chan *Event { + return w.eventChan +} + +func (w *watcher) StartIndex() uint64 { + return w.startIndex +} + +// notify function notifies the watcher. If the watcher interests in the given path, +// the function will return true. +func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool { + // watcher is interested the path in three cases and under one condition + // the condition is that the event happens after the watcher's sinceIndex + + // 1. the path at which the event happens is the path the watcher is watching at. + // For example if the watcher is watching at "/foo" and the event happens at "/foo", + // the watcher must be interested in that event. + + // 2. the watcher is a recursive watcher, it interests in the event happens after + // its watching path. For example if watcher A watches at "/foo" and it is a recursive + // one, it will interest in the event happens at "/foo/bar". + + // 3. when we delete a directory, we need to force notify all the watchers who watches + // at the file we need to delete. + // For example a watcher is watching at "/foo/bar". And we deletes "/foo". The watcher + // should get notified even if "/foo" is not the path it is watching. + if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex { + // We cannot block here if the eventChan capacity is full, otherwise + // etcd will hang. eventChan capacity is full when the rate of + // notifications are higher than our send rate. + // If this happens, we close the channel. + select { + case w.eventChan <- e: + default: + // We have missed a notification. Remove the watcher. + // Removing the watcher also closes the eventChan. + w.remove() + } + return true + } + return false +} + +// Remove removes the watcher from watcherHub +// The actual remove function is guaranteed to only be executed once +func (w *watcher) Remove() { + w.hub.mutex.Lock() + defer w.hub.mutex.Unlock() + + close(w.eventChan) + if w.remove != nil { + w.remove() + } +} + +// nopWatcher is a watcher that receives nothing, always blocking. +type nopWatcher struct{} + +func NewNopWatcher() Watcher { return &nopWatcher{} } +func (w *nopWatcher) EventChan() chan *Event { return nil } +func (w *nopWatcher) StartIndex() uint64 { return 0 } +func (w *nopWatcher) Remove() {} diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/github.com/coreos/etcd/store/watcher_hub.go new file mode 100644 index 000000000000..13c23e391d91 --- /dev/null +++ b/vendor/github.com/coreos/etcd/store/watcher_hub.go @@ -0,0 +1,200 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "container/list" + "path" + "strings" + "sync" + "sync/atomic" + + etcdErr "github.com/coreos/etcd/error" +) + +// A watcherHub contains all subscribed watchers +// watchers is a map with watched path as key and watcher as value +// EventHistory keeps the old events for watcherHub. It is used to help +// watcher to get a continuous event history. Or a watcher might miss the +// event happens between the end of the first watch command and the start +// of the second command. +type watcherHub struct { + // count must be the first element to keep 64-bit alignment for atomic + // access + + count int64 // current number of watchers. + + mutex sync.Mutex + watchers map[string]*list.List + EventHistory *EventHistory +} + +// newWatchHub creates a watcherHub. The capacity determines how many events we will +// keep in the eventHistory. +// Typically, we only need to keep a small size of history[smaller than 20K]. +// Ideally, it should smaller than 20K/s[max throughput] * 2 * 50ms[RTT] = 2000 +func newWatchHub(capacity int) *watcherHub { + return &watcherHub{ + watchers: make(map[string]*list.List), + EventHistory: newEventHistory(capacity), + } +} + +// Watch function returns a Watcher. +// If recursive is true, the first change after index under key will be sent to the event channel of the watcher. +// If recursive is false, the first change after index at key will be sent to the event channel of the watcher. +// If index is zero, watch will start from the current index + 1. +func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *etcdErr.Error) { + reportWatchRequest() + event, err := wh.EventHistory.scan(key, recursive, index) + + if err != nil { + err.Index = storeIndex + return nil, err + } + + w := &watcher{ + eventChan: make(chan *Event, 100), // use a buffered channel + recursive: recursive, + stream: stream, + sinceIndex: index, + startIndex: storeIndex, + hub: wh, + } + + wh.mutex.Lock() + defer wh.mutex.Unlock() + // If the event exists in the known history, append the EtcdIndex and return immediately + if event != nil { + ne := event.Clone() + ne.EtcdIndex = storeIndex + w.eventChan <- ne + return w, nil + } + + l, ok := wh.watchers[key] + + var elem *list.Element + + if ok { // add the new watcher to the back of the list + elem = l.PushBack(w) + } else { // create a new list and add the new watcher + l = list.New() + elem = l.PushBack(w) + wh.watchers[key] = l + } + + w.remove = func() { + if w.removed { // avoid removing it twice + return + } + w.removed = true + l.Remove(elem) + atomic.AddInt64(&wh.count, -1) + reportWatcherRemoved() + if l.Len() == 0 { + delete(wh.watchers, key) + } + } + + atomic.AddInt64(&wh.count, 1) + reportWatcherAdded() + + return w, nil +} + +func (wh *watcherHub) add(e *Event) { + wh.EventHistory.addEvent(e) +} + +// notify function accepts an event and notify to the watchers. +func (wh *watcherHub) notify(e *Event) { + e = wh.EventHistory.addEvent(e) // add event into the eventHistory + + segments := strings.Split(e.Node.Key, "/") + + currPath := "/" + + // walk through all the segments of the path and notify the watchers + // if the path is "/foo/bar", it will notify watchers with path "/", + // "/foo" and "/foo/bar" + + for _, segment := range segments { + currPath = path.Join(currPath, segment) + // notify the watchers who interests in the changes of current path + wh.notifyWatchers(e, currPath, false) + } +} + +func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) { + wh.mutex.Lock() + defer wh.mutex.Unlock() + + l, ok := wh.watchers[nodePath] + if ok { + curr := l.Front() + + for curr != nil { + next := curr.Next() // save reference to the next one in the list + + w, _ := curr.Value.(*watcher) + + originalPath := (e.Node.Key == nodePath) + if (originalPath || !isHidden(nodePath, e.Node.Key)) && w.notify(e, originalPath, deleted) { + if !w.stream { // do not remove the stream watcher + // if we successfully notify a watcher + // we need to remove the watcher from the list + // and decrease the counter + w.removed = true + l.Remove(curr) + atomic.AddInt64(&wh.count, -1) + reportWatcherRemoved() + } + } + + curr = next // update current to the next element in the list + } + + if l.Len() == 0 { + // if we have notified all watcher in the list + // we can delete the list + delete(wh.watchers, nodePath) + } + } +} + +// clone function clones the watcherHub and return the cloned one. +// only clone the static content. do not clone the current watchers. +func (wh *watcherHub) clone() *watcherHub { + clonedHistory := wh.EventHistory.clone() + + return &watcherHub{ + EventHistory: clonedHistory, + } +} + +// isHidden checks to see if key path is considered hidden to watch path i.e. the +// last element is hidden or it's within a hidden directory +func isHidden(watchPath, keyPath string) bool { + // When deleting a directory, watchPath might be deeper than the actual keyPath + // For example, when deleting /foo we also need to notify watchers on /foo/bar. + if len(watchPath) > len(keyPath) { + return false + } + // if watch path is just a "/", after path will start without "/" + // add a "/" to deal with the special case when watchPath is "/" + afterPath := path.Clean("/" + keyPath[len(watchPath):]) + return strings.Contains(afterPath, "/_") +} diff --git a/vendor/github.com/coreos/etcd/version/BUILD b/vendor/github.com/coreos/etcd/version/BUILD new file mode 100644 index 000000000000..803549742385 --- /dev/null +++ b/vendor/github.com/coreos/etcd/version/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["version.go"], + importpath = "github.com/coreos/etcd/version", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coreos/go-semver/semver:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go new file mode 100644 index 000000000000..39505ba54585 --- /dev/null +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.2.15" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/vendor/github.com/coreos/etcd/wal/BUILD b/vendor/github.com/coreos/etcd/wal/BUILD new file mode 100644 index 000000000000..ff8d471353ec --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/BUILD @@ -0,0 +1,80 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "decoder.go", + "doc.go", + "encoder.go", + "file_pipeline.go", + "metrics.go", + "repair.go", + "util.go", + "wal.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "wal_unix.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "wal_windows.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/coreos/etcd/wal", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/coreos/etcd/pkg/crc:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/ioutil:go_default_library", + "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", + "//vendor/github.com/coreos/etcd/raft:go_default_library", + "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", + "//vendor/github.com/coreos/etcd/wal/walpb:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coreos/etcd/wal/walpb:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/wal/decoder.go b/vendor/github.com/coreos/etcd/wal/decoder.go new file mode 100644 index 000000000000..0d9b4428c946 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/decoder.go @@ -0,0 +1,185 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "bufio" + "encoding/binary" + "hash" + "io" + "sync" + + "github.com/coreos/etcd/pkg/crc" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal/walpb" +) + +const minSectorSize = 512 + +type decoder struct { + mu sync.Mutex + brs []*bufio.Reader + + // lastValidOff file offset following the last valid decoded record + lastValidOff int64 + crc hash.Hash32 +} + +func newDecoder(r ...io.Reader) *decoder { + readers := make([]*bufio.Reader, len(r)) + for i := range r { + readers[i] = bufio.NewReader(r[i]) + } + return &decoder{ + brs: readers, + crc: crc.New(0, crcTable), + } +} + +func (d *decoder) decode(rec *walpb.Record) error { + rec.Reset() + d.mu.Lock() + defer d.mu.Unlock() + return d.decodeRecord(rec) +} + +func (d *decoder) decodeRecord(rec *walpb.Record) error { + if len(d.brs) == 0 { + return io.EOF + } + + l, err := readInt64(d.brs[0]) + if err == io.EOF || (err == nil && l == 0) { + // hit end of file or preallocated space + d.brs = d.brs[1:] + if len(d.brs) == 0 { + return io.EOF + } + d.lastValidOff = 0 + return d.decodeRecord(rec) + } + if err != nil { + return err + } + + recBytes, padBytes := decodeFrameSize(l) + + data := make([]byte, recBytes+padBytes) + if _, err = io.ReadFull(d.brs[0], data); err != nil { + // ReadFull returns io.EOF only if no bytes were read + // the decoder should treat this as an ErrUnexpectedEOF instead. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if err := rec.Unmarshal(data[:recBytes]); err != nil { + if d.isTornEntry(data) { + return io.ErrUnexpectedEOF + } + return err + } + + // skip crc checking if the record type is crcType + if rec.Type != crcType { + d.crc.Write(rec.Data) + if err := rec.Validate(d.crc.Sum32()); err != nil { + if d.isTornEntry(data) { + return io.ErrUnexpectedEOF + } + return err + } + } + // record decoded as valid; point last valid offset to end of record + d.lastValidOff += recBytes + padBytes + 8 + return nil +} + +func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) { + // the record size is stored in the lower 56 bits of the 64-bit length + recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56)) + // non-zero padding is indicated by set MSb / a negative length + if lenField < 0 { + // padding is stored in lower 3 bits of length MSB + padBytes = int64((uint64(lenField) >> 56) & 0x7) + } + return +} + +// isTornEntry determines whether the last entry of the WAL was partially written +// and corrupted because of a torn write. +func (d *decoder) isTornEntry(data []byte) bool { + if len(d.brs) != 1 { + return false + } + + fileOff := d.lastValidOff + 8 + curOff := 0 + chunks := [][]byte{} + // split data on sector boundaries + for curOff < len(data) { + chunkLen := int(minSectorSize - (fileOff % minSectorSize)) + if chunkLen > len(data)-curOff { + chunkLen = len(data) - curOff + } + chunks = append(chunks, data[curOff:curOff+chunkLen]) + fileOff += int64(chunkLen) + curOff += chunkLen + } + + // if any data for a sector chunk is all 0, it's a torn write + for _, sect := range chunks { + isZero := true + for _, v := range sect { + if v != 0 { + isZero = false + break + } + } + if isZero { + return true + } + } + return false +} + +func (d *decoder) updateCRC(prevCrc uint32) { + d.crc = crc.New(prevCrc, crcTable) +} + +func (d *decoder) lastCRC() uint32 { + return d.crc.Sum32() +} + +func (d *decoder) lastOffset() int64 { return d.lastValidOff } + +func mustUnmarshalEntry(d []byte) raftpb.Entry { + var e raftpb.Entry + pbutil.MustUnmarshal(&e, d) + return e +} + +func mustUnmarshalState(d []byte) raftpb.HardState { + var s raftpb.HardState + pbutil.MustUnmarshal(&s, d) + return s +} + +func readInt64(r io.Reader) (int64, error) { + var n int64 + err := binary.Read(r, binary.LittleEndian, &n) + return n, err +} diff --git a/vendor/github.com/coreos/etcd/wal/doc.go b/vendor/github.com/coreos/etcd/wal/doc.go new file mode 100644 index 000000000000..a3abd69613d4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/doc.go @@ -0,0 +1,75 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package wal provides an implementation of a write ahead log that is used by +etcd. + +A WAL is created at a particular directory and is made up of a number of +segmented WAL files. Inside of each file the raft state and entries are appended +to it with the Save method: + + metadata := []byte{} + w, err := wal.Create("/var/lib/etcd", metadata) + ... + err := w.Save(s, ents) + +After saving a raft snapshot to disk, SaveSnapshot method should be called to +record it. So WAL can match with the saved snapshot when restarting. + + err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2}) + +When a user has finished using a WAL it must be closed: + + w.Close() + +Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record +protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a +64-bit packed structure holding the length of the remaining logical record data in its lower +56 bits and its physical padding in the first three bits of the most significant byte. Each +record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32 +value of all record protobufs preceding the current record. + +WAL files are placed inside of the directory in the following format: +$seq-$index.wal + +The first WAL file to be created will be 0000000000000000-0000000000000000.wal +indicating an initial sequence of 0 and an initial raft index of 0. The first +entry written to WAL MUST have raft index 0. + +WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal +sequence number and cause a new file to be created. If the last raft index saved +was 0x20 and this is the first time cut has been called on this WAL then the sequence will +increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal. +If a second cut issues 0x10 entries with incremental index later then the file will be called: +0000000000000002-0000000000000031.wal. + +At a later time a WAL can be opened at a particular snapshot. If there is no +snapshot, an empty snapshot should be passed in. + + w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2}) + ... + +The snapshot must have been written to the WAL. + +Additional items cannot be Saved to this WAL until all of the items from the given +snapshot to the end of the WAL are read first: + + metadata, state, ents, err := w.ReadAll() + +This will give you the metadata, the last raft.State and the slice of +raft.Entry items in the log. + +*/ +package wal diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go new file mode 100644 index 000000000000..aac1e197e590 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/encoder.go @@ -0,0 +1,120 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "encoding/binary" + "hash" + "io" + "os" + "sync" + + "github.com/coreos/etcd/pkg/crc" + "github.com/coreos/etcd/pkg/ioutil" + "github.com/coreos/etcd/wal/walpb" +) + +// walPageBytes is the alignment for flushing records to the backing Writer. +// It should be a multiple of the minimum sector size so that WAL can safely +// distinguish between torn writes and ordinary data corruption. +const walPageBytes = 8 * minSectorSize + +type encoder struct { + mu sync.Mutex + bw *ioutil.PageWriter + + crc hash.Hash32 + buf []byte + uint64buf []byte +} + +func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { + return &encoder{ + bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset), + crc: crc.New(prevCrc, crcTable), + // 1MB buffer + buf: make([]byte, 1024*1024), + uint64buf: make([]byte, 8), + } +} + +// newFileEncoder creates a new encoder with current file offset for the page writer. +func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { + offset, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + return newEncoder(f, prevCrc, int(offset)), nil +} + +func (e *encoder) encode(rec *walpb.Record) error { + e.mu.Lock() + defer e.mu.Unlock() + + e.crc.Write(rec.Data) + rec.Crc = e.crc.Sum32() + var ( + data []byte + err error + n int + ) + + if rec.Size() > len(e.buf) { + data, err = rec.Marshal() + if err != nil { + return err + } + } else { + n, err = rec.MarshalTo(e.buf) + if err != nil { + return err + } + data = e.buf[:n] + } + + lenField, padBytes := encodeFrameSize(len(data)) + if err = writeUint64(e.bw, lenField, e.uint64buf); err != nil { + return err + } + + if padBytes != 0 { + data = append(data, make([]byte, padBytes)...) + } + _, err = e.bw.Write(data) + return err +} + +func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) { + lenField = uint64(dataBytes) + // force 8 byte alignment so length never gets a torn write + padBytes = (8 - (dataBytes % 8)) % 8 + if padBytes != 0 { + lenField |= uint64(0x80|padBytes) << 56 + } + return +} + +func (e *encoder) flush() error { + e.mu.Lock() + defer e.mu.Unlock() + return e.bw.Flush() +} + +func writeUint64(w io.Writer, n uint64, buf []byte) error { + // http://golang.org/src/encoding/binary/binary.go + binary.LittleEndian.PutUint64(buf, n) + _, err := w.Write(buf) + return err +} diff --git a/vendor/github.com/coreos/etcd/wal/file_pipeline.go b/vendor/github.com/coreos/etcd/wal/file_pipeline.go new file mode 100644 index 000000000000..5e32a0693c14 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/file_pipeline.go @@ -0,0 +1,97 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/coreos/etcd/pkg/fileutil" +) + +// filePipeline pipelines allocating disk space +type filePipeline struct { + // dir to put files + dir string + // size of files to make, in bytes + size int64 + // count number of files generated + count int + + filec chan *fileutil.LockedFile + errc chan error + donec chan struct{} +} + +func newFilePipeline(dir string, fileSize int64) *filePipeline { + fp := &filePipeline{ + dir: dir, + size: fileSize, + filec: make(chan *fileutil.LockedFile), + errc: make(chan error, 1), + donec: make(chan struct{}), + } + go fp.run() + return fp +} + +// Open returns a fresh file for writing. Rename the file before calling +// Open again or there will be file collisions. +func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) { + select { + case f = <-fp.filec: + case err = <-fp.errc: + } + return +} + +func (fp *filePipeline) Close() error { + close(fp.donec) + return <-fp.errc +} + +func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) { + // count % 2 so this file isn't the same as the one last published + fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2)) + if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil { + return nil, err + } + if err = fileutil.Preallocate(f.File, fp.size, true); err != nil { + plog.Errorf("failed to allocate space when creating new wal file (%v)", err) + f.Close() + return nil, err + } + fp.count++ + return f, nil +} + +func (fp *filePipeline) run() { + defer close(fp.errc) + for { + f, err := fp.alloc() + if err != nil { + fp.errc <- err + return + } + select { + case fp.filec <- f: + case <-fp.donec: + os.Remove(f.Name()) + f.Close() + return + } + } +} diff --git a/vendor/github.com/coreos/etcd/wal/metrics.go b/vendor/github.com/coreos/etcd/wal/metrics.go new file mode 100644 index 000000000000..9e089d380f9b --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/metrics.go @@ -0,0 +1,31 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import "github.com/prometheus/client_golang/prometheus" + +var ( + syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "wal_fsync_duration_seconds", + Help: "The latency distributions of fsync called by wal.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) +) + +func init() { + prometheus.MustRegister(syncDurations) +} diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go new file mode 100644 index 000000000000..091036b57b9a --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/repair.go @@ -0,0 +1,99 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "io" + "os" + "path/filepath" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/wal/walpb" +) + +// Repair tries to repair ErrUnexpectedEOF in the +// last wal file by truncating. +func Repair(dirpath string) bool { + f, err := openLast(dirpath) + if err != nil { + return false + } + defer f.Close() + + rec := &walpb.Record{} + decoder := newDecoder(f) + for { + lastOffset := decoder.lastOffset() + err := decoder.decode(rec) + switch err { + case nil: + // update crc of the decoder when necessary + switch rec.Type { + case crcType: + crc := decoder.crc.Sum32() + // current crc of decoder must match the crc of the record. + // do no need to match 0 crc, since the decoder is a new one at this case. + if crc != 0 && rec.Validate(crc) != nil { + return false + } + decoder.updateCRC(rec.Crc) + } + continue + case io.EOF: + return true + case io.ErrUnexpectedEOF: + plog.Noticef("repairing %v", f.Name()) + bf, bferr := os.Create(f.Name() + ".broken") + if bferr != nil { + plog.Errorf("could not repair %v, failed to create backup file", f.Name()) + return false + } + defer bf.Close() + + if _, err = f.Seek(0, io.SeekStart); err != nil { + plog.Errorf("could not repair %v, failed to read file", f.Name()) + return false + } + + if _, err = io.Copy(bf, f); err != nil { + plog.Errorf("could not repair %v, failed to copy file", f.Name()) + return false + } + + if err = f.Truncate(int64(lastOffset)); err != nil { + plog.Errorf("could not repair %v, failed to truncate file", f.Name()) + return false + } + if err = fileutil.Fsync(f.File); err != nil { + plog.Errorf("could not repair %v, failed to sync file", f.Name()) + return false + } + return true + default: + plog.Errorf("could not repair error (%v)", err) + return false + } + } +} + +// openLast opens the last wal file for read and write. +func openLast(dirpath string) (*fileutil.LockedFile, error) { + names, err := readWalNames(dirpath) + if err != nil { + return nil, err + } + last := filepath.Join(dirpath, names[len(names)-1]) + return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode) +} diff --git a/vendor/github.com/coreos/etcd/wal/util.go b/vendor/github.com/coreos/etcd/wal/util.go new file mode 100644 index 000000000000..5c56e228872d --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/util.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "errors" + "fmt" + "strings" + + "github.com/coreos/etcd/pkg/fileutil" +) + +var ( + badWalName = errors.New("bad wal name") +) + +func Exist(dirpath string) bool { + names, err := fileutil.ReadDir(dirpath) + if err != nil { + return false + } + return len(names) != 0 +} + +// searchIndex returns the last array index of names whose raft index section is +// equal to or smaller than the given index. +// The given names MUST be sorted. +func searchIndex(names []string, index uint64) (int, bool) { + for i := len(names) - 1; i >= 0; i-- { + name := names[i] + _, curIndex, err := parseWalName(name) + if err != nil { + plog.Panicf("parse correct name should never fail: %v", err) + } + if index >= curIndex { + return i, true + } + } + return -1, false +} + +// names should have been sorted based on sequence number. +// isValidSeq checks whether seq increases continuously. +func isValidSeq(names []string) bool { + var lastSeq uint64 + for _, name := range names { + curSeq, _, err := parseWalName(name) + if err != nil { + plog.Panicf("parse correct name should never fail: %v", err) + } + if lastSeq != 0 && lastSeq != curSeq-1 { + return false + } + lastSeq = curSeq + } + return true +} +func readWalNames(dirpath string) ([]string, error) { + names, err := fileutil.ReadDir(dirpath) + if err != nil { + return nil, err + } + wnames := checkWalNames(names) + if len(wnames) == 0 { + return nil, ErrFileNotFound + } + return wnames, nil +} + +func checkWalNames(names []string) []string { + wnames := make([]string, 0) + for _, name := range names { + if _, _, err := parseWalName(name); err != nil { + // don't complain about left over tmp files + if !strings.HasSuffix(name, ".tmp") { + plog.Warningf("ignored file %v in wal", name) + } + continue + } + wnames = append(wnames, name) + } + return wnames +} + +func parseWalName(str string) (seq, index uint64, err error) { + if !strings.HasSuffix(str, ".wal") { + return 0, 0, badWalName + } + _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index) + return seq, index, err +} + +func walName(seq, index uint64) string { + return fmt.Sprintf("%016x-%016x.wal", seq, index) +} diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go new file mode 100644 index 000000000000..2cac25c1c904 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/wal.go @@ -0,0 +1,628 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "bytes" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal/walpb" + + "github.com/coreos/pkg/capnslog" +) + +const ( + metadataType int64 = iota + 1 + entryType + stateType + crcType + snapshotType + + // warnSyncDuration is the amount of time allotted to an fsync before + // logging a warning + warnSyncDuration = time.Second +) + +var ( + // SegmentSizeBytes is the preallocated size of each wal segment file. + // The actual size might be larger than this. In general, the default + // value should be used, but this is defined as an exported variable + // so that tests can set a different segment size. + SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal") + + ErrMetadataConflict = errors.New("wal: conflicting metadata found") + ErrFileNotFound = errors.New("wal: file not found") + ErrCRCMismatch = errors.New("wal: crc mismatch") + ErrSnapshotMismatch = errors.New("wal: snapshot mismatch") + ErrSnapshotNotFound = errors.New("wal: snapshot not found") + crcTable = crc32.MakeTable(crc32.Castagnoli) +) + +// WAL is a logical representation of the stable storage. +// WAL is either in read mode or append mode but not both. +// A newly created WAL is in append mode, and ready for appending records. +// A just opened WAL is in read mode, and ready for reading records. +// The WAL will be ready for appending after reading out all the previous records. +type WAL struct { + dir string // the living directory of the underlay files + + // dirFile is a fd for the wal directory for syncing on Rename + dirFile *os.File + + metadata []byte // metadata recorded at the head of each WAL + state raftpb.HardState // hardstate recorded at the head of WAL + + start walpb.Snapshot // snapshot to start reading + decoder *decoder // decoder to decode records + readClose func() error // closer for decode reader + + mu sync.Mutex + enti uint64 // index of the last entry saved to the wal + encoder *encoder // encoder to encode records + + locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing) + fp *filePipeline +} + +// Create creates a WAL ready for appending records. The given metadata is +// recorded at the head of each WAL file, and can be retrieved with ReadAll. +func Create(dirpath string, metadata []byte) (*WAL, error) { + if Exist(dirpath) { + return nil, os.ErrExist + } + + // keep temporary wal directory so WAL initialization appears atomic + tmpdirpath := filepath.Clean(dirpath) + ".tmp" + if fileutil.Exist(tmpdirpath) { + if err := os.RemoveAll(tmpdirpath); err != nil { + return nil, err + } + } + if err := fileutil.CreateDirAll(tmpdirpath); err != nil { + return nil, err + } + + p := filepath.Join(tmpdirpath, walName(0, 0)) + f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode) + if err != nil { + return nil, err + } + if _, err = f.Seek(0, io.SeekEnd); err != nil { + return nil, err + } + if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { + return nil, err + } + + w := &WAL{ + dir: dirpath, + metadata: metadata, + } + w.encoder, err = newFileEncoder(f.File, 0) + if err != nil { + return nil, err + } + w.locks = append(w.locks, f) + if err = w.saveCrc(0); err != nil { + return nil, err + } + if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil { + return nil, err + } + if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil { + return nil, err + } + + if w, err = w.renameWal(tmpdirpath); err != nil { + return nil, err + } + + // directory was renamed; sync parent dir to persist rename + pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir)) + if perr != nil { + return nil, perr + } + if perr = fileutil.Fsync(pdir); perr != nil { + return nil, perr + } + if perr = pdir.Close(); err != nil { + return nil, perr + } + + return w, nil +} + +// Open opens the WAL at the given snap. +// The snap SHOULD have been previously saved to the WAL, or the following +// ReadAll will fail. +// The returned WAL is ready to read and the first record will be the one after +// the given snap. The WAL cannot be appended to before reading out all of its +// previous records. +func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) { + w, err := openAtIndex(dirpath, snap, true) + if err != nil { + return nil, err + } + if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil { + return nil, err + } + return w, nil +} + +// OpenForRead only opens the wal files for read. +// Write on a read only wal panics. +func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) { + return openAtIndex(dirpath, snap, false) +} + +func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { + names, err := readWalNames(dirpath) + if err != nil { + return nil, err + } + + nameIndex, ok := searchIndex(names, snap.Index) + if !ok || !isValidSeq(names[nameIndex:]) { + return nil, ErrFileNotFound + } + + // open the wal files + rcs := make([]io.ReadCloser, 0) + rs := make([]io.Reader, 0) + ls := make([]*fileutil.LockedFile, 0) + for _, name := range names[nameIndex:] { + p := filepath.Join(dirpath, name) + if write { + l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode) + if err != nil { + closeAll(rcs...) + return nil, err + } + ls = append(ls, l) + rcs = append(rcs, l) + } else { + rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode) + if err != nil { + closeAll(rcs...) + return nil, err + } + ls = append(ls, nil) + rcs = append(rcs, rf) + } + rs = append(rs, rcs[len(rcs)-1]) + } + + closer := func() error { return closeAll(rcs...) } + + // create a WAL ready for reading + w := &WAL{ + dir: dirpath, + start: snap, + decoder: newDecoder(rs...), + readClose: closer, + locks: ls, + } + + if write { + // write reuses the file descriptors from read; don't close so + // WAL can append without dropping the file lock + w.readClose = nil + if _, _, err := parseWalName(filepath.Base(w.tail().Name())); err != nil { + closer() + return nil, err + } + w.fp = newFilePipeline(w.dir, SegmentSizeBytes) + } + + return w, nil +} + +// ReadAll reads out records of the current WAL. +// If opened in write mode, it must read out all records until EOF. Or an error +// will be returned. +// If opened in read mode, it will try to read all records if possible. +// If it cannot read out the expected snap, it will return ErrSnapshotNotFound. +// If loaded snap doesn't match with the expected one, it will return +// all the records and error ErrSnapshotMismatch. +// TODO: detect not-last-snap error. +// TODO: maybe loose the checking of match. +// After ReadAll, the WAL will be ready for appending new records. +func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + rec := &walpb.Record{} + decoder := w.decoder + + var match bool + for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { + switch rec.Type { + case entryType: + e := mustUnmarshalEntry(rec.Data) + if e.Index > w.start.Index { + ents = append(ents[:e.Index-w.start.Index-1], e) + } + w.enti = e.Index + case stateType: + state = mustUnmarshalState(rec.Data) + case metadataType: + if metadata != nil && !bytes.Equal(metadata, rec.Data) { + state.Reset() + return nil, state, nil, ErrMetadataConflict + } + metadata = rec.Data + case crcType: + crc := decoder.crc.Sum32() + // current crc of decoder must match the crc of the record. + // do no need to match 0 crc, since the decoder is a new one at this case. + if crc != 0 && rec.Validate(crc) != nil { + state.Reset() + return nil, state, nil, ErrCRCMismatch + } + decoder.updateCRC(rec.Crc) + case snapshotType: + var snap walpb.Snapshot + pbutil.MustUnmarshal(&snap, rec.Data) + if snap.Index == w.start.Index { + if snap.Term != w.start.Term { + state.Reset() + return nil, state, nil, ErrSnapshotMismatch + } + match = true + } + default: + state.Reset() + return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type) + } + } + + switch w.tail() { + case nil: + // We do not have to read out all entries in read mode. + // The last record maybe a partial written one, so + // ErrunexpectedEOF might be returned. + if err != io.EOF && err != io.ErrUnexpectedEOF { + state.Reset() + return nil, state, nil, err + } + default: + // We must read all of the entries if WAL is opened in write mode. + if err != io.EOF { + state.Reset() + return nil, state, nil, err + } + // decodeRecord() will return io.EOF if it detects a zero record, + // but this zero record may be followed by non-zero records from + // a torn write. Overwriting some of these non-zero records, but + // not all, will cause CRC errors on WAL open. Since the records + // were never fully synced to disk in the first place, it's safe + // to zero them out to avoid any CRC errors from new writes. + if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil { + return nil, state, nil, err + } + if err = fileutil.ZeroToEnd(w.tail().File); err != nil { + return nil, state, nil, err + } + } + + err = nil + if !match { + err = ErrSnapshotNotFound + } + + // close decoder, disable reading + if w.readClose != nil { + w.readClose() + w.readClose = nil + } + w.start = walpb.Snapshot{} + + w.metadata = metadata + + if w.tail() != nil { + // create encoder (chain crc with the decoder), enable appending + w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC()) + if err != nil { + return + } + } + w.decoder = nil + + return metadata, state, ents, err +} + +// cut closes current file written and creates a new one ready to append. +// cut first creates a temp wal file and writes necessary headers into it. +// Then cut atomically rename temp wal file to a wal file. +func (w *WAL) cut() error { + // close old wal file; truncate to avoid wasting space if an early cut + off, serr := w.tail().Seek(0, io.SeekCurrent) + if serr != nil { + return serr + } + if err := w.tail().Truncate(off); err != nil { + return err + } + if err := w.sync(); err != nil { + return err + } + + fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1)) + + // create a temp wal file with name sequence + 1, or truncate the existing one + newTail, err := w.fp.Open() + if err != nil { + return err + } + + // update writer and save the previous crc + w.locks = append(w.locks, newTail) + prevCrc := w.encoder.crc.Sum32() + w.encoder, err = newFileEncoder(w.tail().File, prevCrc) + if err != nil { + return err + } + if err = w.saveCrc(prevCrc); err != nil { + return err + } + if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { + return err + } + if err = w.saveState(&w.state); err != nil { + return err + } + // atomically move temp wal file to wal file + if err = w.sync(); err != nil { + return err + } + + off, err = w.tail().Seek(0, io.SeekCurrent) + if err != nil { + return err + } + + if err = os.Rename(newTail.Name(), fpath); err != nil { + return err + } + if err = fileutil.Fsync(w.dirFile); err != nil { + return err + } + + newTail.Close() + + if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { + return err + } + if _, err = newTail.Seek(off, io.SeekStart); err != nil { + return err + } + + w.locks[len(w.locks)-1] = newTail + + prevCrc = w.encoder.crc.Sum32() + w.encoder, err = newFileEncoder(w.tail().File, prevCrc) + if err != nil { + return err + } + + plog.Infof("segmented wal file %v is created", fpath) + return nil +} + +func (w *WAL) sync() error { + if w.encoder != nil { + if err := w.encoder.flush(); err != nil { + return err + } + } + start := time.Now() + err := fileutil.Fdatasync(w.tail().File) + + duration := time.Since(start) + if duration > warnSyncDuration { + plog.Warningf("sync duration of %v, expected less than %v", duration, warnSyncDuration) + } + syncDurations.Observe(duration.Seconds()) + + return err +} + +// ReleaseLockTo releases the locks, which has smaller index than the given index +// except the largest one among them. +// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release +// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4. +func (w *WAL) ReleaseLockTo(index uint64) error { + w.mu.Lock() + defer w.mu.Unlock() + + var smaller int + found := false + + for i, l := range w.locks { + _, lockIndex, err := parseWalName(filepath.Base(l.Name())) + if err != nil { + return err + } + if lockIndex >= index { + smaller = i - 1 + found = true + break + } + } + + // if no lock index is greater than the release index, we can + // release lock up to the last one(excluding). + if !found && len(w.locks) != 0 { + smaller = len(w.locks) - 1 + } + + if smaller <= 0 { + return nil + } + + for i := 0; i < smaller; i++ { + if w.locks[i] == nil { + continue + } + w.locks[i].Close() + } + w.locks = w.locks[smaller:] + + return nil +} + +func (w *WAL) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.fp != nil { + w.fp.Close() + w.fp = nil + } + + if w.tail() != nil { + if err := w.sync(); err != nil { + return err + } + } + for _, l := range w.locks { + if l == nil { + continue + } + if err := l.Close(); err != nil { + plog.Errorf("failed to unlock during closing wal: %s", err) + } + } + + return w.dirFile.Close() +} + +func (w *WAL) saveEntry(e *raftpb.Entry) error { + // TODO: add MustMarshalTo to reduce one allocation. + b := pbutil.MustMarshal(e) + rec := &walpb.Record{Type: entryType, Data: b} + if err := w.encoder.encode(rec); err != nil { + return err + } + w.enti = e.Index + return nil +} + +func (w *WAL) saveState(s *raftpb.HardState) error { + if raft.IsEmptyHardState(*s) { + return nil + } + w.state = *s + b := pbutil.MustMarshal(s) + rec := &walpb.Record{Type: stateType, Data: b} + return w.encoder.encode(rec) +} + +func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { + w.mu.Lock() + defer w.mu.Unlock() + + // short cut, do not call sync + if raft.IsEmptyHardState(st) && len(ents) == 0 { + return nil + } + + mustSync := raft.MustSync(st, w.state, len(ents)) + + // TODO(xiangli): no more reference operator + for i := range ents { + if err := w.saveEntry(&ents[i]); err != nil { + return err + } + } + if err := w.saveState(&st); err != nil { + return err + } + + curOff, err := w.tail().Seek(0, io.SeekCurrent) + if err != nil { + return err + } + if curOff < SegmentSizeBytes { + if mustSync { + return w.sync() + } + return nil + } + + return w.cut() +} + +func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { + b := pbutil.MustMarshal(&e) + + w.mu.Lock() + defer w.mu.Unlock() + + rec := &walpb.Record{Type: snapshotType, Data: b} + if err := w.encoder.encode(rec); err != nil { + return err + } + // update enti only when snapshot is ahead of last index + if w.enti < e.Index { + w.enti = e.Index + } + return w.sync() +} + +func (w *WAL) saveCrc(prevCrc uint32) error { + return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc}) +} + +func (w *WAL) tail() *fileutil.LockedFile { + if len(w.locks) > 0 { + return w.locks[len(w.locks)-1] + } + return nil +} + +func (w *WAL) seq() uint64 { + t := w.tail() + if t == nil { + return 0 + } + seq, _, err := parseWalName(filepath.Base(t.Name())) + if err != nil { + plog.Fatalf("bad wal name %s (%v)", t.Name(), err) + } + return seq +} + +func closeAll(rcs ...io.ReadCloser) error { + for _, f := range rcs { + if err := f.Close(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/wal/wal_unix.go b/vendor/github.com/coreos/etcd/wal/wal_unix.go new file mode 100644 index 000000000000..82fd6a17a741 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/wal_unix.go @@ -0,0 +1,44 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package wal + +import ( + "os" + + "github.com/coreos/etcd/pkg/fileutil" +) + +func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { + // On non-Windows platforms, hold the lock while renaming. Releasing + // the lock and trying to reacquire it quickly can be flaky because + // it's possible the process will fork to spawn a process while this is + // happening. The fds are set up as close-on-exec by the Go runtime, + // but there is a window between the fork and the exec where another + // process holds the lock. + + if err := os.RemoveAll(w.dir); err != nil { + return nil, err + } + if err := os.Rename(tmpdirpath, w.dir); err != nil { + return nil, err + } + + w.fp = newFilePipeline(w.dir, SegmentSizeBytes) + df, err := fileutil.OpenDir(w.dir) + w.dirFile = df + return w, err +} diff --git a/vendor/github.com/coreos/etcd/wal/wal_windows.go b/vendor/github.com/coreos/etcd/wal/wal_windows.go new file mode 100644 index 000000000000..0b9e434cf546 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/wal_windows.go @@ -0,0 +1,41 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "os" + + "github.com/coreos/etcd/wal/walpb" +) + +func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { + // rename of directory with locked files doesn't work on + // windows; close the WAL to release the locks so the directory + // can be renamed + w.Close() + if err := os.Rename(tmpdirpath, w.dir); err != nil { + return nil, err + } + // reopen and relock + newWAL, oerr := Open(w.dir, walpb.Snapshot{}) + if oerr != nil { + return nil, oerr + } + if _, _, _, err := newWAL.ReadAll(); err != nil { + newWAL.Close() + return nil, err + } + return newWAL, nil +} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/BUILD b/vendor/github.com/coreos/etcd/wal/walpb/BUILD new file mode 100644 index 000000000000..8dafb2eb1805 --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/walpb/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "record.go", + "record.pb.go", + ], + importpath = "github.com/coreos/etcd/wal/walpb", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.go b/vendor/github.com/coreos/etcd/wal/walpb/record.go new file mode 100644 index 000000000000..30a05e0c139c --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.go @@ -0,0 +1,29 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package walpb + +import "errors" + +var ( + ErrCRCMismatch = errors.New("walpb: crc mismatch") +) + +func (rec *Record) Validate(crc uint32) error { + if rec.Crc == crc { + return nil + } + rec.Reset() + return ErrCRCMismatch +} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go new file mode 100644 index 000000000000..664fae1305bf --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go @@ -0,0 +1,521 @@ +// Code generated by protoc-gen-gogo. +// source: record.proto +// DO NOT EDIT! + +/* + Package walpb is a generated protocol buffer package. + + It is generated from these files: + record.proto + + It has these top-level messages: + Record + Snapshot +*/ +package walpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Record struct { + Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` + Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Record) Reset() { *m = Record{} } +func (m *Record) String() string { return proto.CompactTextString(m) } +func (*Record) ProtoMessage() {} +func (*Record) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{0} } + +type Snapshot struct { + Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{1} } + +func init() { + proto.RegisterType((*Record)(nil), "walpb.Record") + proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") +} +func (m *Record) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Record) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Crc)) + if m.Data != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRecord(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Record(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Record(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRecord(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Record) Size() (n int) { + var l int + _ = l + n += 1 + sovRecord(uint64(m.Type)) + n += 1 + sovRecord(uint64(m.Crc)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRecord(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + n += 1 + sovRecord(uint64(m.Index)) + n += 1 + sovRecord(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRecord(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRecord(x uint64) (n int) { + return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Record) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Record: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) + } + m.Crc = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Crc |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRecord + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRecord(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRecord + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRecord(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRecord + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRecord(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRecord + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRecord(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) } + +var fileDescriptorRecord = []byte{ + // 186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, + 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, + 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, + 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45, + 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a, + 0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11, + 0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a, + 0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b, + 0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc, + 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.proto b/vendor/github.com/coreos/etcd/wal/walpb/record.proto new file mode 100644 index 000000000000..b694cb2338aa --- /dev/null +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.proto @@ -0,0 +1,20 @@ +syntax = "proto2"; +package walpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message Record { + optional int64 type = 1 [(gogoproto.nullable) = false]; + optional uint32 crc = 2 [(gogoproto.nullable) = false]; + optional bytes data = 3; +} + +message Snapshot { + optional uint64 index = 1 [(gogoproto.nullable) = false]; + optional uint64 term = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/semver/BUILD b/vendor/github.com/coreos/go-semver/semver/BUILD new file mode 100644 index 000000000000..b1dee52d8c72 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "semver.go", + "sort.go", + ], + importpath = "github.com/coreos/go-semver/semver", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 000000000000..110fc23e1520 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,268 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 000000000000..e256b41a5ddf --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 000000000000..37ec93a14fdc --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/journal/BUILD b/vendor/github.com/coreos/go-systemd/journal/BUILD new file mode 100644 index 000000000000..a2667c4e63c1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["journal.go"], + importpath = "github.com/coreos/go-systemd/journal", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 000000000000..7f434990d25b --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,179 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var conn net.Conn + +func init() { + var err error + conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") + if err != nil { + conn = nil + } +} + +// Enabled returns true if the local systemd journal is available for logging +func Enabled() bool { + return conn != nil +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + if conn == nil { + return journalError("could not connect to journald socket") + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, err := io.Copy(conn, data) + if err != nil && isSocketSpaceError(err) { + file, err := tempFd() + if err != nil { + return journalError(err.Error()) + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return journalError(err.Error()) + } + + rights := syscall.UnixRights(int(file.Fd())) + + /* this connection should always be a UnixConn, but better safe than sorry */ + unixConn, ok := conn.(*net.UnixConn) + if !ok { + return journalError("can't send file through non-Unix connection") + } + unixConn.WriteMsgUnix([]byte{}, rights, nil) + } else if err != nil { + return journalError(err.Error()) + } + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if !validVarName(name) { + journalError("variable name contains invalid character, ignoring") + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +func validVarName(name string) bool { + /* The variable name must be in uppercase and consist only of characters, + * numbers and underscores, and may not begin with an underscore. (from the docs) + */ + + valid := name[0] != '_' + for _, c := range name { + valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + } + return valid +} + +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok { + return false + } + + sysErr, ok := opErr.Err.(syscall.Errno) + if !ok { + return false + } + + return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS +} + +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +func journalError(s string) error { + s = "journal error: " + s + fmt.Fprintln(os.Stderr, s) + return errors.New(s) +} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 000000000000..e06d2081865a --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 000000000000..b39ddfa5cbde --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/BUILD b/vendor/github.com/coreos/pkg/capnslog/BUILD new file mode 100644 index 000000000000..c00b0b02df48 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/BUILD @@ -0,0 +1,116 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "formatters.go", + "glog_formatter.go", + "log_hijack.go", + "logmap.go", + "pkg_logger.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "init.go", + "journald_formatter.go", + "syslog_formatter.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "init_windows.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/coreos/pkg/capnslog", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/coreos/go-systemd/journal:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 000000000000..81efb1fb6a7b --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 000000000000..b305a845fb2b --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 000000000000..426603ef305c --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 000000000000..44b8cd361b0f --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,49 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` pacakge uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 000000000000..45530506537f --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 000000000000..72e05207c52c --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,68 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 000000000000..970086b9f973 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 000000000000..84954488308d --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,240 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 000000000000..612d55c66c80 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,177 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 000000000000..4be5a1f2de39 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 000000000000..c836416192da --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/BUILD b/vendor/github.com/davecgh/go-spew/spew/BUILD new file mode 100644 index 000000000000..ffc25d9cb9b6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "bypass.go", + "common.go", + "config.go", + "doc.go", + "dump.go", + "format.go", + "spew.go", + ], + importpath = "github.com/davecgh/go-spew/spew", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 000000000000..8a4a6589a2d4 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 000000000000..1fe3cf3d5d10 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 000000000000..7c519ff47ac3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 000000000000..2e3d22f31202 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 000000000000..aacaac6f1e1e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 000000000000..df1d582a728a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 000000000000..c49875bacbb8 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 000000000000..32c0e3388253 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore new file mode 100644 index 000000000000..80bed650ec03 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml new file mode 100644 index 000000000000..1027f56cd94d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.travis.yml @@ -0,0 +1,13 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/BUILD b/vendor/github.com/dgrijalva/jwt-go/BUILD new file mode 100644 index 000000000000..fd4939a3d31d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "claims.go", + "doc.go", + "ecdsa.go", + "ecdsa_utils.go", + "errors.go", + "hmac.go", + "map_claims.go", + "none.go", + "parser.go", + "rsa.go", + "rsa_pss.go", + "rsa_utils.go", + "signing_method.go", + "token.go", + ], + importpath = "github.com/dgrijalva/jwt-go", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 000000000000..df83a9c2f019 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 000000000000..7fc1f793cbc4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 000000000000..25aec486c634 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,85 @@ +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) + +**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +**NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 000000000000..c21551f6bbdd --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,111 @@ +## `jwt-go` Version History + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 000000000000..f0228f02e033 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 000000000000..a86dc1a3b348 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 000000000000..2f59a2223638 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,147 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 000000000000..d19624b7264f --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 000000000000..1c93024aad2e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 000000000000..c22991925455 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKey +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 000000000000..291213c460d4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 000000000000..f04d189d067b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 000000000000..7bf1c4ea0842 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,131 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + parts := strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + var err error + token := &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 000000000000..0ae0b1984e5a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,100 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 000000000000..10ee9db8a4ed --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 000000000000..213a90dbbf8d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 000000000000..ed1f212b21e1 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 000000000000..d637e0867c65 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/elazarl/goproxy/.gitignore b/vendor/github.com/elazarl/goproxy/.gitignore new file mode 100644 index 000000000000..1005f6f1ecd6 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/.gitignore @@ -0,0 +1,2 @@ +bin +*.swp diff --git a/vendor/github.com/elazarl/goproxy/BUILD b/vendor/github.com/elazarl/goproxy/BUILD new file mode 100644 index 000000000000..499ca6a9453d --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "actions.go", + "certs.go", + "chunked.go", + "counterecryptor.go", + "ctx.go", + "dispatcher.go", + "doc.go", + "https.go", + "proxy.go", + "responses.go", + "signer.go", + ], + importpath = "github.com/elazarl/goproxy", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/elazarl/goproxy/LICENSE b/vendor/github.com/elazarl/goproxy/LICENSE new file mode 100644 index 000000000000..2067e567c9fe --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Elazar Leibovich. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Elazar Leibovich. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elazarl/goproxy/README.md b/vendor/github.com/elazarl/goproxy/README.md new file mode 100644 index 000000000000..209786a15647 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/README.md @@ -0,0 +1,112 @@ +# Introduction + +Package goproxy provides a customizable HTTP proxy library for Go (golang), + +It supports regular HTTP proxy, HTTPS through CONNECT, and "hijacking" HTTPS +connection using "Man in the Middle" style attack. + +The intent of the proxy, is to be usable with reasonable amount of traffic +yet, customizable and programable. + +The proxy itself is simply a `net/http` handler. + +In order to use goproxy, one should set his browser to use goproxy as an HTTP +proxy. Here is how you do that [in Chrome](https://support.google.com/chrome/answer/96815?hl=en) +and [in Firefox](http://www.wikihow.com/Enter-Proxy-Settings-in-Firefox). + +For example, the URL you should use as proxy when running `./bin/basic` is +`localhost:8080`, as this is the default binding for the basic proxy. + +## Mailing List + +New features would be discussed on the [mailing list](https://groups.google.com/forum/#!forum/goproxy-dev) +before their development. + +## Latest Stable Release + +Get the latest goproxy from `gopkg.in/elazarl/goproxy.v1`. + +# Why not Fiddler2? + +Fiddler is an excellent software with similar intent. However, Fiddler is not +as customable as goproxy intend to be. The main difference is, Fiddler is not +intended to be used as a real proxy. + +A possible use case that suits goproxy but +not Fiddler, is, gathering statisitics on page load times for a certain website over a week. +With goproxy you could ask all your users to set their proxy to a dedicated machine running a +goproxy server. Fiddler is a GUI app not designed to be ran like a server for multiple users. + +# A taste of goproxy + +To get a taste of `goproxy`, a basic HTTP/HTTPS transparent proxy + + + import ( + "github.com/elazarl/goproxy" + "log" + "net/http" + ) + + func main() { + proxy := goproxy.NewProxyHttpServer() + proxy.Verbose = true + log.Fatal(http.ListenAndServe(":8080", proxy)) + } + + +This line will add `X-GoProxy: yxorPoG-X` header to all requests sent through the proxy + + proxy.OnRequest().DoFunc( + func(r *http.Request,ctx *goproxy.ProxyCtx)(*http.Request,*http.Response) { + r.Header.Set("X-GoProxy","yxorPoG-X") + return r,nil + }) + +`DoFunc` will process all incoming requests to the proxy. It will add a header to the request +and return it. The proxy will send the modified request. + +Note that we returned nil value as the response. Have we returned a response, goproxy would +have discarded the request and sent the new response to the client. + +In order to refuse connections to reddit at work time + + proxy.OnRequest(goproxy.DstHostIs("www.reddit.com")).DoFunc( + func(r *http.Request,ctx *goproxy.ProxyCtx)(*http.Request,*http.Response) { + if h,_,_ := time.Now().Clock(); h >= 8 && h <= 17 { + return r,goproxy.NewResponse(r, + goproxy.ContentTypeText,http.StatusForbidden, + "Don't waste your time!") + } + return r,nil + }) + +`DstHostIs` returns a `ReqCondition`, that is a function receiving a `Request` and returning a boolean +we will only process requests that matches the condition. `DstHostIs("www.reddit.com")` will return +a `ReqCondition` accepting only requests directed to "www.reddit.com". + +`DoFunc` will recieve a function that will preprocess the request. We can change the request, or +return a response. If the time is between 8:00am and 17:00pm, we will neglect the request, and +return a precanned text response saying "do not waste your time". + +See additional examples in the examples directory. + +# What's New + + 1. Ability to `Hijack` CONNECT requests. See +[the eavesdropper example](https://github.com/elazarl/goproxy/blob/master/examples/eavesdropper/main.go#L17) + +# License + +I put the software temporarily under the Go-compatible BSD license, +if this prevents someone from using the software, do let mee know and I'll consider changing it. + +At any rate, user feedback is very important for me, so I'll be delighted to know if you're using this package. + +# Beta Software + +I've received a positive feedback from a few people who use goproxy in production settings. +I believe it is good enough for usage. + +I'll try to keep reasonable backwards compatability. In case of a major API change, +I'll change the import path. diff --git a/vendor/github.com/elazarl/goproxy/actions.go b/vendor/github.com/elazarl/goproxy/actions.go new file mode 100644 index 000000000000..e1a3e7ff17e7 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/actions.go @@ -0,0 +1,57 @@ +package goproxy + +import "net/http" + +// ReqHandler will "tamper" with the request coming to the proxy server +// If Handle returns req,nil the proxy will send the returned request +// to the destination server. If it returns nil,resp the proxy will +// skip sending any requests, and will simply return the response `resp` +// to the client. +type ReqHandler interface { + Handle(req *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response) +} + +// A wrapper that would convert a function to a ReqHandler interface type +type FuncReqHandler func(req *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response) + +// FuncReqHandler.Handle(req,ctx) <=> FuncReqHandler(req,ctx) +func (f FuncReqHandler) Handle(req *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response) { + return f(req, ctx) +} + +// after the proxy have sent the request to the destination server, it will +// "filter" the response through the RespHandlers it has. +// The proxy server will send to the client the response returned by the RespHandler. +// In case of error, resp will be nil, and ctx.RoundTrip.Error will contain the error +type RespHandler interface { + Handle(resp *http.Response, ctx *ProxyCtx) *http.Response +} + +// A wrapper that would convert a function to a RespHandler interface type +type FuncRespHandler func(resp *http.Response, ctx *ProxyCtx) *http.Response + +// FuncRespHandler.Handle(req,ctx) <=> FuncRespHandler(req,ctx) +func (f FuncRespHandler) Handle(resp *http.Response, ctx *ProxyCtx) *http.Response { + return f(resp, ctx) +} + +// When a client send a CONNECT request to a host, the request is filtered through +// all the HttpsHandlers the proxy has, and if one returns true, the connection is +// sniffed using Man in the Middle attack. +// That is, the proxy will create a TLS connection with the client, another TLS +// connection with the destination the client wished to connect to, and would +// send back and forth all messages from the server to the client and vice versa. +// The request and responses sent in this Man In the Middle channel are filtered +// through the usual flow (request and response filtered through the ReqHandlers +// and RespHandlers) +type HttpsHandler interface { + HandleConnect(req string, ctx *ProxyCtx) (*ConnectAction, string) +} + +// A wrapper that would convert a function to a HttpsHandler interface type +type FuncHttpsHandler func(host string, ctx *ProxyCtx) (*ConnectAction, string) + +// FuncHttpsHandler should implement the RespHandler interface +func (f FuncHttpsHandler) HandleConnect(host string, ctx *ProxyCtx) (*ConnectAction, string) { + return f(host, ctx) +} diff --git a/vendor/github.com/elazarl/goproxy/all.bash b/vendor/github.com/elazarl/goproxy/all.bash new file mode 100755 index 000000000000..6503e73dc929 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/all.bash @@ -0,0 +1,15 @@ +#!/bin/bash + +go test || exit +for action in $@; do go $action; done + +mkdir -p bin +find regretable examples/* ext/* -maxdepth 0 -type d | while read d; do + (cd $d + go build -o ../../bin/$(basename $d) + find *_test.go -maxdepth 0 2>/dev/null|while read f;do + for action in $@; do go $action; done + go test + break + done) +done diff --git a/vendor/github.com/elazarl/goproxy/ca.pem b/vendor/github.com/elazarl/goproxy/ca.pem new file mode 100644 index 000000000000..f13842493227 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbWgAwIBAgIBADALBgkqhkiG9w0BAQUwSjEjMCEGA1UEChMaZ2l0aHVi +LmNvbS9lbGF6YXJsL2dvcHJveHkxIzAhBgNVBAMTGmdpdGh1Yi5jb20vZWxhemFy +bC9nb3Byb3h5MB4XDTAwMDEwMTAwMDAwMFoXDTQ5MTIzMTIzNTk1OVowSjEjMCEG +A1UEChMaZ2l0aHViLmNvbS9lbGF6YXJsL2dvcHJveHkxIzAhBgNVBAMTGmdpdGh1 +Yi5jb20vZWxhemFybC9nb3Byb3h5MIGdMAsGCSqGSIb3DQEBAQOBjQAwgYkCgYEA +vz9BbCaJjxs73Tvcq3leP32hAGerQ1RgvlZ68Z4nZmoVHfl+2Nr/m0dmW+GdOfpT +cs/KzfJjYGr/84x524fiuR8GdZ0HOtXJzyF5seoWnbBIuyr1PbEpgRhGQMqqOUuj +YExeLbfNHPIoJ8XZ1Vzyv3YxjbmjWA+S/uOe9HWtDbMCAwEAAaNGMEQwDgYDVR0P +AQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQFMAMBAf8w +DAYDVR0RBAUwA4IBKjALBgkqhkiG9w0BAQUDgYEAIcL8huSmGMompNujsvePTUnM +oEUKtX4Eh/+s+DSfV/TyI0I+3GiPpLplEgFWuoBIJGios0r1dKh5N0TGjxX/RmGm +qo7E4jjJuo8Gs5U8/fgThZmshax2lwLtbRNwhvUVr65GdahLsZz8I+hySLuatVvR +qHHq/FQORIiNyNpq/Hg= +-----END CERTIFICATE----- diff --git a/vendor/github.com/elazarl/goproxy/certs.go b/vendor/github.com/elazarl/goproxy/certs.go new file mode 100644 index 000000000000..b485e2cc3450 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/certs.go @@ -0,0 +1,56 @@ +package goproxy + +import ( + "crypto/tls" + "crypto/x509" +) + +func init() { + if goproxyCaErr != nil { + panic("Error parsing builtin CA " + goproxyCaErr.Error()) + } + var err error + if GoproxyCa.Leaf, err = x509.ParseCertificate(GoproxyCa.Certificate[0]); err != nil { + panic("Error parsing builtin CA " + err.Error()) + } +} + +var tlsClientSkipVerify = &tls.Config{InsecureSkipVerify: true} + +var defaultTlsConfig = &tls.Config{ + InsecureSkipVerify: true, +} + +var CA_CERT = []byte(`-----BEGIN CERTIFICATE----- +MIICSjCCAbWgAwIBAgIBADALBgkqhkiG9w0BAQUwSjEjMCEGA1UEChMaZ2l0aHVi +LmNvbS9lbGF6YXJsL2dvcHJveHkxIzAhBgNVBAMTGmdpdGh1Yi5jb20vZWxhemFy +bC9nb3Byb3h5MB4XDTAwMDEwMTAwMDAwMFoXDTQ5MTIzMTIzNTk1OVowSjEjMCEG +A1UEChMaZ2l0aHViLmNvbS9lbGF6YXJsL2dvcHJveHkxIzAhBgNVBAMTGmdpdGh1 +Yi5jb20vZWxhemFybC9nb3Byb3h5MIGdMAsGCSqGSIb3DQEBAQOBjQAwgYkCgYEA +vz9BbCaJjxs73Tvcq3leP32hAGerQ1RgvlZ68Z4nZmoVHfl+2Nr/m0dmW+GdOfpT +cs/KzfJjYGr/84x524fiuR8GdZ0HOtXJzyF5seoWnbBIuyr1PbEpgRhGQMqqOUuj +YExeLbfNHPIoJ8XZ1Vzyv3YxjbmjWA+S/uOe9HWtDbMCAwEAAaNGMEQwDgYDVR0P +AQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQFMAMBAf8w +DAYDVR0RBAUwA4IBKjALBgkqhkiG9w0BAQUDgYEAIcL8huSmGMompNujsvePTUnM +oEUKtX4Eh/+s+DSfV/TyI0I+3GiPpLplEgFWuoBIJGios0r1dKh5N0TGjxX/RmGm +qo7E4jjJuo8Gs5U8/fgThZmshax2lwLtbRNwhvUVr65GdahLsZz8I+hySLuatVvR +qHHq/FQORIiNyNpq/Hg= +-----END CERTIFICATE-----`) + +var CA_KEY = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQC/P0FsJomPGzvdO9yreV4/faEAZ6tDVGC+VnrxnidmahUd+X7Y +2v+bR2Zb4Z05+lNyz8rN8mNgav/zjHnbh+K5HwZ1nQc61cnPIXmx6hadsEi7KvU9 +sSmBGEZAyqo5S6NgTF4tt80c8ignxdnVXPK/djGNuaNYD5L+4570da0NswIDAQAB +AoGBALzIv1b4D7ARTR3NOr6V9wArjiOtMjUrdLhO+9vIp9IEA8ZsA9gjDlCEwbkP +VDnoLjnWfraff5Os6+3JjHy1fYpUiCdnk2XA6iJSL1XWKQZPt3wOunxP4lalDgED +QTRReFbA/y/Z4kSfTXpVj68ytcvSRW/N7q5/qRtbN9804jpBAkEA0s6lvH2btSLA +mcEdwhs7zAslLbdld7rvfUeP82gPPk0S6yUqTNyikqshM9AwAktHY7WvYdKl+ghZ +HTxKVC4DoQJBAOg/IAW5RbXknP+Lf7AVtBgw3E+Yfa3mcdLySe8hjxxyZq825Zmu +Rt5Qj4Lw6ifSFNy4kiiSpE/ZCukYvUXGENMCQFkPxSWlS6tzSzuqQxBGwTSrYMG3 +wb6b06JyIXcMd6Qym9OMmBpw/J5KfnSNeDr/4uFVWQtTG5xO+pdHaX+3EQECQQDl +qcbY4iX1gWVfr2tNjajSYz751yoxVbkpiT9joiQLVXYFvpu+JYEfRzsjmWl0h2Lq +AftG8/xYmaEYcMZ6wSrRAkBUwiom98/8wZVlB6qbwhU1EKDFANvICGSWMIhPx3v7 +MJqTIj4uJhte2/uyVvZ6DC6noWYgy+kLgqG0S97tUEG8 +-----END RSA PRIVATE KEY-----`) + +var GoproxyCa, goproxyCaErr = tls.X509KeyPair(CA_CERT, CA_KEY) diff --git a/vendor/github.com/elazarl/goproxy/chunked.go b/vendor/github.com/elazarl/goproxy/chunked.go new file mode 100644 index 000000000000..83654f658630 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/chunked.go @@ -0,0 +1,59 @@ +// Taken from $GOROOT/src/pkg/net/http/chunked +// needed to write https responses to client. +package goproxy + +import ( + "io" + "strconv" +) + +// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream. +// +// newChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using newChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func newChunkedWriter(w io.Writer) io.WriteCloser { + return &chunkedWriter{w} +} + +// Writing to chunkedWriter translates to writing in HTTP chunked Transfer +// Encoding wire format to the underlying Wire chunkedWriter. +type chunkedWriter struct { + Wire io.Writer +} + +// Write the contents of data as one chunk to Wire. +// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has +// a bug since it does not check for success of io.WriteString +func (cw *chunkedWriter) Write(data []byte) (n int, err error) { + + // Don't send 0-length data. It looks like EOF for chunked encoding. + if len(data) == 0 { + return 0, nil + } + + head := strconv.FormatInt(int64(len(data)), 16) + "\r\n" + + if _, err = io.WriteString(cw.Wire, head); err != nil { + return 0, err + } + if n, err = cw.Wire.Write(data); err != nil { + return + } + if n != len(data) { + err = io.ErrShortWrite + return + } + _, err = io.WriteString(cw.Wire, "\r\n") + + return +} + +func (cw *chunkedWriter) Close() error { + _, err := io.WriteString(cw.Wire, "0\r\n") + return err +} diff --git a/vendor/github.com/elazarl/goproxy/counterecryptor.go b/vendor/github.com/elazarl/goproxy/counterecryptor.go new file mode 100644 index 000000000000..494e7a4fedfe --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/counterecryptor.go @@ -0,0 +1,68 @@ +package goproxy + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "errors" +) + +type CounterEncryptorRand struct { + cipher cipher.Block + counter []byte + rand []byte + ix int +} + +func NewCounterEncryptorRandFromKey(key interface{}, seed []byte) (r CounterEncryptorRand, err error) { + var keyBytes []byte + switch key := key.(type) { + case *rsa.PrivateKey: + keyBytes = x509.MarshalPKCS1PrivateKey(key) + default: + err = errors.New("only RSA keys supported") + return + } + h := sha256.New() + if r.cipher, err = aes.NewCipher(h.Sum(keyBytes)[:aes.BlockSize]); err != nil { + return + } + r.counter = make([]byte, r.cipher.BlockSize()) + if seed != nil { + copy(r.counter, h.Sum(seed)[:r.cipher.BlockSize()]) + } + r.rand = make([]byte, r.cipher.BlockSize()) + r.ix = len(r.rand) + return +} + +func (c *CounterEncryptorRand) Seed(b []byte) { + if len(b) != len(c.counter) { + panic("SetCounter: wrong counter size") + } + copy(c.counter, b) +} + +func (c *CounterEncryptorRand) refill() { + c.cipher.Encrypt(c.rand, c.counter) + for i := 0; i < len(c.counter); i++ { + if c.counter[i]++; c.counter[i] != 0 { + break + } + } + c.ix = 0 +} + +func (c *CounterEncryptorRand) Read(b []byte) (n int, err error) { + if c.ix == len(c.rand) { + c.refill() + } + if n = len(c.rand) - c.ix; n > len(b) { + n = len(b) + } + copy(b, c.rand[c.ix:c.ix+n]) + c.ix += n + return +} diff --git a/vendor/github.com/elazarl/goproxy/ctx.go b/vendor/github.com/elazarl/goproxy/ctx.go new file mode 100644 index 000000000000..a90b70dc9035 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/ctx.go @@ -0,0 +1,87 @@ +package goproxy + +import ( + "net/http" + "regexp" +) + +// ProxyCtx is the Proxy context, contains useful information about every request. It is passed to +// every user function. Also used as a logger. +type ProxyCtx struct { + // Will contain the client request from the proxy + Req *http.Request + // Will contain the remote server's response (if available. nil if the request wasn't send yet) + Resp *http.Response + RoundTripper RoundTripper + // will contain the recent error that occured while trying to send receive or parse traffic + Error error + // A handle for the user to keep data in the context, from the call of ReqHandler to the + // call of RespHandler + UserData interface{} + // Will connect a request to a response + Session int64 + proxy *ProxyHttpServer +} + +type RoundTripper interface { + RoundTrip(req *http.Request, ctx *ProxyCtx) (*http.Response, error) +} + +type RoundTripperFunc func(req *http.Request, ctx *ProxyCtx) (*http.Response, error) + +func (f RoundTripperFunc) RoundTrip(req *http.Request, ctx *ProxyCtx) (*http.Response, error) { + return f(req, ctx) +} + +func (ctx *ProxyCtx) RoundTrip(req *http.Request) (*http.Response, error) { + if ctx.RoundTripper != nil { + return ctx.RoundTripper.RoundTrip(req, ctx) + } + return ctx.proxy.Tr.RoundTrip(req) +} + +func (ctx *ProxyCtx) printf(msg string, argv ...interface{}) { + ctx.proxy.Logger.Printf("[%03d] "+msg+"\n", append([]interface{}{ctx.Session & 0xFF}, argv...)...) +} + +// Logf prints a message to the proxy's log. Should be used in a ProxyHttpServer's filter +// This message will be printed only if the Verbose field of the ProxyHttpServer is set to true +// +// proxy.OnRequest().DoFunc(func(r *http.Request,ctx *goproxy.ProxyCtx) *http.Request{ +// nr := atomic.AddInt32(&counter,1) +// ctx.Printf("So far %d requests",nr) +// return r +// }) +func (ctx *ProxyCtx) Logf(msg string, argv ...interface{}) { + if ctx.proxy.Verbose { + ctx.printf("INFO: "+msg, argv...) + } +} + +// Warnf prints a message to the proxy's log. Should be used in a ProxyHttpServer's filter +// This message will always be printed. +// +// proxy.OnRequest().DoFunc(func(r *http.Request,ctx *goproxy.ProxyCtx) *http.Request{ +// f,err := os.OpenFile(cachedContent) +// if err != nil { +// ctx.Warnf("error open file %v: %v",cachedContent,err) +// return r +// } +// return r +// }) +func (ctx *ProxyCtx) Warnf(msg string, argv ...interface{}) { + ctx.printf("WARN: "+msg, argv...) +} + +var charsetFinder = regexp.MustCompile("charset=([^ ;]*)") + +// Will try to infer the character set of the request from the headers. +// Returns the empty string if we don't know which character set it used. +// Currently it will look for charset= in the Content-Type header of the request. +func (ctx *ProxyCtx) Charset() string { + charsets := charsetFinder.FindStringSubmatch(ctx.Resp.Header.Get("Content-Type")) + if charsets == nil { + return "" + } + return charsets[1] +} diff --git a/vendor/github.com/elazarl/goproxy/dispatcher.go b/vendor/github.com/elazarl/goproxy/dispatcher.go new file mode 100644 index 000000000000..831853a01fff --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/dispatcher.go @@ -0,0 +1,320 @@ +package goproxy + +import ( + "bytes" + "io/ioutil" + "net" + "net/http" + "regexp" + "strings" +) + +// ReqCondition.HandleReq will decide whether or not to use the ReqHandler on an HTTP request +// before sending it to the remote server +type ReqCondition interface { + RespCondition + HandleReq(req *http.Request, ctx *ProxyCtx) bool +} + +// RespCondition.HandleReq will decide whether or not to use the RespHandler on an HTTP response +// before sending it to the proxy client. Note that resp might be nil, in case there was an +// error sending the request. +type RespCondition interface { + HandleResp(resp *http.Response, ctx *ProxyCtx) bool +} + +// ReqConditionFunc.HandleReq(req,ctx) <=> ReqConditionFunc(req,ctx) +type ReqConditionFunc func(req *http.Request, ctx *ProxyCtx) bool + +// RespConditionFunc.HandleResp(resp,ctx) <=> RespConditionFunc(resp,ctx) +type RespConditionFunc func(resp *http.Response, ctx *ProxyCtx) bool + +func (c ReqConditionFunc) HandleReq(req *http.Request, ctx *ProxyCtx) bool { + return c(req, ctx) +} + +// ReqConditionFunc cannot test responses. It only satisfies RespCondition interface so that +// to be usable as RespCondition. +func (c ReqConditionFunc) HandleResp(resp *http.Response, ctx *ProxyCtx) bool { + return c(ctx.Req, ctx) +} + +func (c RespConditionFunc) HandleResp(resp *http.Response, ctx *ProxyCtx) bool { + return c(resp, ctx) +} + +// UrlHasPrefix returns a ReqCondtion checking wether the destination URL the proxy client has requested +// has the given prefix, with or without the host. +// For example UrlHasPrefix("host/x") will match requests of the form 'GET host/x', and will match +// requests to url 'http://host/x' +func UrlHasPrefix(prefix string) ReqConditionFunc { + return func(req *http.Request, ctx *ProxyCtx) bool { + return strings.HasPrefix(req.URL.Path, prefix) || + strings.HasPrefix(req.URL.Host+"/"+req.URL.Path, prefix) || + strings.HasPrefix(req.URL.Scheme+req.URL.Host+req.URL.Path, prefix) + } +} + +// UrlIs returns a ReqCondition, testing whether or not the request URL is one of the given strings +// with or without the host prefix. +// UrlIs("google.com/","foo") will match requests 'GET /' to 'google.com', requests `'GET google.com/' to +// any host, and requests of the form 'GET foo'. +func UrlIs(urls ...string) ReqConditionFunc { + urlSet := make(map[string]bool) + for _, u := range urls { + urlSet[u] = true + } + return func(req *http.Request, ctx *ProxyCtx) bool { + _, pathOk := urlSet[req.URL.Path] + _, hostAndOk := urlSet[req.URL.Host+req.URL.Path] + return pathOk || hostAndOk + } +} + +// ReqHostMatches returns a ReqCondition, testing whether the host to which the request was directed to matches +// any of the given regular expressions. +func ReqHostMatches(regexps ...*regexp.Regexp) ReqConditionFunc { + return func(req *http.Request, ctx *ProxyCtx) bool { + for _, re := range regexps { + if re.MatchString(req.Host) { + return true + } + } + return false + } +} + +// ReqHostIs returns a ReqCondition, testing whether the host to which the request is directed to equal +// to one of the given strings +func ReqHostIs(hosts ...string) ReqConditionFunc { + hostSet := make(map[string]bool) + for _, h := range hosts { + hostSet[h] = true + } + return func(req *http.Request, ctx *ProxyCtx) bool { + _, ok := hostSet[req.URL.Host] + return ok + } +} + +var localHostIpv4 = regexp.MustCompile(`127\.0\.0\.\d+`) + +// IsLocalHost checks whether the destination host is explicitly local host +// (buggy, there can be IPv6 addresses it doesn't catch) +var IsLocalHost ReqConditionFunc = func(req *http.Request, ctx *ProxyCtx) bool { + return req.URL.Host == "::1" || + req.URL.Host == "0:0:0:0:0:0:0:1" || + localHostIpv4.MatchString(req.URL.Host) || + req.URL.Host == "localhost" +} + +// UrlMatches returns a ReqCondition testing whether the destination URL +// of the request matches the given regexp, with or without prefix +func UrlMatches(re *regexp.Regexp) ReqConditionFunc { + return func(req *http.Request, ctx *ProxyCtx) bool { + return re.MatchString(req.URL.Path) || + re.MatchString(req.URL.Host+req.URL.Path) + } +} + +// DstHostIs returns a ReqCondtion testing wether the host in the request url is the given string +func DstHostIs(host string) ReqConditionFunc { + return func(req *http.Request, ctx *ProxyCtx) bool { + return req.URL.Host == host + } +} + +// SrcIpIs returns a ReqCondtion testing wether the source IP of the request is the given string +func SrcIpIs(ip string) ReqCondition { + return ReqConditionFunc(func(req *http.Request, ctx *ProxyCtx) bool { + return strings.HasPrefix(req.RemoteAddr, ip+":") + }) +} + +// Not returns a ReqCondtion negating the given ReqCondition +func Not(r ReqCondition) ReqConditionFunc { + return func(req *http.Request, ctx *ProxyCtx) bool { + return !r.HandleReq(req, ctx) + } +} + +// ContentTypeIs returns a RespCondition testing whether the HTTP response has Content-Type header equal +// to one of the given strings. +func ContentTypeIs(typ string, types ...string) RespCondition { + types = append(types, typ) + return RespConditionFunc(func(resp *http.Response, ctx *ProxyCtx) bool { + if resp == nil { + return false + } + contentType := resp.Header.Get("Content-Type") + for _, typ := range types { + if contentType == typ || strings.HasPrefix(contentType, typ+";") { + return true + } + } + return false + }) +} + +// ProxyHttpServer.OnRequest Will return a temporary ReqProxyConds struct, aggregating the given condtions. +// You will use the ReqProxyConds struct to register a ReqHandler, that would filter +// the request, only if all the given ReqCondition matched. +// Typical usage: +// proxy.OnRequest(UrlIs("example.com/foo"),UrlMatches(regexp.MustParse(`.*\.exampl.\com\./.*`)).Do(...) +func (proxy *ProxyHttpServer) OnRequest(conds ...ReqCondition) *ReqProxyConds { + return &ReqProxyConds{proxy, conds} +} + +// ReqProxyConds aggregate ReqConditions for a ProxyHttpServer. Upon calling Do, it will register a ReqHandler that would +// handle the request if all conditions on the HTTP request are met. +type ReqProxyConds struct { + proxy *ProxyHttpServer + reqConds []ReqCondition +} + +// DoFunc is equivalent to proxy.OnRequest().Do(FuncReqHandler(f)) +func (pcond *ReqProxyConds) DoFunc(f func(req *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response)) { + pcond.Do(FuncReqHandler(f)) +} + +// ReqProxyConds.Do will register the ReqHandler on the proxy, +// the ReqHandler will handle the HTTP request if all the conditions +// aggregated in the ReqProxyConds are met. Typical usage: +// proxy.OnRequest().Do(handler) // will call handler.Handle(req,ctx) on every request to the proxy +// proxy.OnRequest(cond1,cond2).Do(handler) +// // given request to the proxy, will test if cond1.HandleReq(req,ctx) && cond2.HandleReq(req,ctx) are true +// // if they are, will call handler.Handle(req,ctx) +func (pcond *ReqProxyConds) Do(h ReqHandler) { + pcond.proxy.reqHandlers = append(pcond.proxy.reqHandlers, + FuncReqHandler(func(r *http.Request, ctx *ProxyCtx) (*http.Request, *http.Response) { + for _, cond := range pcond.reqConds { + if !cond.HandleReq(r, ctx) { + return r, nil + } + } + return h.Handle(r, ctx) + })) +} + +// HandleConnect is used when proxy receives an HTTP CONNECT request, +// it'll then use the HttpsHandler to determine what should it +// do with this request. The handler returns a ConnectAction struct, the Action field in the ConnectAction +// struct returned will determine what to do with this request. ConnectAccept will simply accept the request +// forwarding all bytes from the client to the remote host, ConnectReject will close the connection with the +// client, and ConnectMitm, will assume the underlying connection is an HTTPS connection, and will use Man +// in the Middle attack to eavesdrop the connection. All regular handler will be active on this eavesdropped +// connection. +// The ConnectAction struct contains possible tlsConfig that will be used for eavesdropping. If nil, the proxy +// will use the default tls configuration. +// proxy.OnRequest().HandleConnect(goproxy.AlwaysReject) // rejects all CONNECT requests +func (pcond *ReqProxyConds) HandleConnect(h HttpsHandler) { + pcond.proxy.httpsHandlers = append(pcond.proxy.httpsHandlers, + FuncHttpsHandler(func(host string, ctx *ProxyCtx) (*ConnectAction, string) { + for _, cond := range pcond.reqConds { + if !cond.HandleReq(ctx.Req, ctx) { + return nil, "" + } + } + return h.HandleConnect(host, ctx) + })) +} + +// HandleConnectFunc is equivalent to HandleConnect, +// for example, accepting CONNECT request if they contain a password in header +// io.WriteString(h,password) +// passHash := h.Sum(nil) +// proxy.OnRequest().HandleConnectFunc(func(host string, ctx *ProxyCtx) (*ConnectAction, string) { +// c := sha1.New() +// io.WriteString(c,ctx.Req.Header.Get("X-GoProxy-Auth")) +// if c.Sum(nil) == passHash { +// return OkConnect, host +// } +// return RejectConnect, host +// }) +func (pcond *ReqProxyConds) HandleConnectFunc(f func(host string, ctx *ProxyCtx) (*ConnectAction, string)) { + pcond.HandleConnect(FuncHttpsHandler(f)) +} + +func (pcond *ReqProxyConds) HijackConnect(f func(req *http.Request, client net.Conn, ctx *ProxyCtx)) { + pcond.proxy.httpsHandlers = append(pcond.proxy.httpsHandlers, + FuncHttpsHandler(func(host string, ctx *ProxyCtx) (*ConnectAction, string) { + for _, cond := range pcond.reqConds { + if !cond.HandleReq(ctx.Req, ctx) { + return nil, "" + } + } + return &ConnectAction{Action: ConnectHijack, Hijack: f}, host + })) +} + +// ProxyConds is used to aggregate RespConditions for a ProxyHttpServer. +// Upon calling ProxyConds.Do, it will register a RespHandler that would +// handle the HTTP response from remote server if all conditions on the HTTP response are met. +type ProxyConds struct { + proxy *ProxyHttpServer + reqConds []ReqCondition + respCond []RespCondition +} + +// ProxyConds.DoFunc is equivalent to proxy.OnResponse().Do(FuncRespHandler(f)) +func (pcond *ProxyConds) DoFunc(f func(resp *http.Response, ctx *ProxyCtx) *http.Response) { + pcond.Do(FuncRespHandler(f)) +} + +// ProxyConds.Do will register the RespHandler on the proxy, h.Handle(resp,ctx) will be called on every +// request that matches the conditions aggregated in pcond. +func (pcond *ProxyConds) Do(h RespHandler) { + pcond.proxy.respHandlers = append(pcond.proxy.respHandlers, + FuncRespHandler(func(resp *http.Response, ctx *ProxyCtx) *http.Response { + for _, cond := range pcond.reqConds { + if !cond.HandleReq(ctx.Req, ctx) { + return resp + } + } + for _, cond := range pcond.respCond { + if !cond.HandleResp(resp, ctx) { + return resp + } + } + return h.Handle(resp, ctx) + })) +} + +// OnResponse is used when adding a response-filter to the HTTP proxy, usual pattern is +// proxy.OnResponse(cond1,cond2).Do(handler) // handler.Handle(resp,ctx) will be used +// // if cond1.HandleResp(resp) && cond2.HandleResp(resp) +func (proxy *ProxyHttpServer) OnResponse(conds ...RespCondition) *ProxyConds { + return &ProxyConds{proxy, make([]ReqCondition, 0), conds} +} + +// AlwaysMitm is a HttpsHandler that always eavesdrop https connections, for example to +// eavesdrop all https connections to www.google.com, we can use +// proxy.OnRequest(goproxy.ReqHostIs("www.google.com")).HandleConnect(goproxy.AlwaysMitm) +var AlwaysMitm FuncHttpsHandler = func(host string, ctx *ProxyCtx) (*ConnectAction, string) { + return MitmConnect, host +} + +// AlwaysReject is a HttpsHandler that drops any CONNECT request, for example, this code will disallow +// connections to hosts on any other port than 443 +// proxy.OnRequest(goproxy.Not(goproxy.ReqHostMatches(regexp.MustCompile(":443$"))). +// HandleConnect(goproxy.AlwaysReject) +var AlwaysReject FuncHttpsHandler = func(host string, ctx *ProxyCtx) (*ConnectAction, string) { + return RejectConnect, host +} + +// HandleBytes will return a RespHandler that read the entire body of the request +// to a byte array in memory, would run the user supplied f function on the byte arra, +// and will replace the body of the original response with the resulting byte array. +func HandleBytes(f func(b []byte, ctx *ProxyCtx) []byte) RespHandler { + return FuncRespHandler(func(resp *http.Response, ctx *ProxyCtx) *http.Response { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + ctx.Warnf("Cannot read response %s", err) + return resp + } + resp.Body.Close() + + resp.Body = ioutil.NopCloser(bytes.NewBuffer(f(b, ctx))) + return resp + }) +} diff --git a/vendor/github.com/elazarl/goproxy/doc.go b/vendor/github.com/elazarl/goproxy/doc.go new file mode 100644 index 000000000000..cde21643c235 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/doc.go @@ -0,0 +1,100 @@ +/* +Package goproxy provides a customizable HTTP proxy, +supporting hijacking HTTPS connection. + +The intent of the proxy, is to be usable with reasonable amount of traffic +yet, customizable and programable. + +The proxy itself is simply an `net/http` handler. + +Typical usage is + + proxy := goproxy.NewProxyHttpServer() + proxy.OnRequest(..conditions..).Do(..requesthandler..) + proxy.OnRequest(..conditions..).DoFunc(..requesthandlerFunction..) + proxy.OnResponse(..conditions..).Do(..responesHandler..) + proxy.OnResponse(..conditions..).DoFunc(..responesHandlerFunction..) + http.ListenAndServe(":8080", proxy) + +Adding a header to each request + + proxy.OnRequest().DoFunc(func(r *http.Request,ctx *goproxy.ProxyCtx)*http.Request{ + r.Header.Set("X-GoProxy","1") + return r + }) + +Note that the function is called before the proxy sends the request to the server + +For printing the content type of all incoming responses + + proxy.OnResponse().DoFunc(func(r *http.Response, ctx *goproxy.ProxyCtx)*http.Response{ + println(ctx.Req.Host,"->",r.Header.Get("Content-Type")) + return r + }) + +note that we used the ProxyCtx context variable here. It contains the request +and the response (Req and Resp, Resp is nil if unavailable) of this specific client +interaction with the proxy. + +To print the content type of all responses from a certain url, we'll add a +ReqCondition to the OnResponse function: + + proxy.OnResponse(goproxy.UrlIs("golang.org/pkg")).DoFunc(func(r *http.Response, ctx *goproxy.ProxyCtx)*http.Response{ + println(ctx.Req.Host,"->",r.Header.Get("Content-Type")) + return r + }) + +We can write the condition ourselves, conditions can be set on request and on response + + var random = ReqConditionFunc(func(r *http.Request) bool { + return rand.Intn(1) == 0 + }) + var hasGoProxyHeader = RespConditionFunc(func(resp *http.Response,req *http.Request)bool { + return resp.Header.Get("X-GoProxy") != "" + }) + +Caution! If you give a RespCondition to the OnRequest function, you'll get a run time panic! It doesn't +make sense to read the response, if you still haven't got it! + +Finally, we have convenience function to throw a quick response + + proxy.OnResponse(hasGoProxyHeader).DoFunc(func(r*http.Response,ctx *goproxy.ProxyCtx)*http.Response { + r.Body.Close() + return goproxy.ForbiddenTextResponse(ctx.Req,"Can't see response with X-GoProxy header!") + }) + +we close the body of the original repsonse, and return a new 403 response with a short message. + +Example use cases: + +1. https://github.com/elazarl/goproxy/tree/master/examples/avgSize + +To measure the average size of an Html served in your site. One can ask +all the QA team to access the website by a proxy, and the proxy will +measure the average size of all text/html responses from your host. + +2. [not yet implemented] + +All requests to your web servers should be directed through the proxy, +when the proxy will detect html pieces sent as a response to AJAX +request, it'll send a warning email. + +3. https://github.com/elazarl/goproxy/blob/master/examples/httpdump/ + +Generate a real traffic to your website by real users using through +proxy. Record the traffic, and try it again for more real load testing. + +4. https://github.com/elazarl/goproxy/tree/master/examples/noRedditAtWorktime + +Will allow browsing to reddit.com between 8:00am and 17:00pm + +5. https://github.com/elazarl/goproxy/tree/master/examples/jqueryVersion + +Will warn if multiple versions of jquery are used in the same domain. + +6. https://github.com/elazarl/goproxy/blob/master/examples/upside-down-ternet/ + +Modifies image files in an HTTP response via goproxy's image extension found in ext/. + +*/ +package goproxy diff --git a/vendor/github.com/elazarl/goproxy/https.go b/vendor/github.com/elazarl/goproxy/https.go new file mode 100644 index 000000000000..d8d627802ba3 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/https.go @@ -0,0 +1,352 @@ +package goproxy + +import ( + "bufio" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync/atomic" +) + +type ConnectActionLiteral int + +const ( + ConnectAccept = iota + ConnectReject + ConnectMitm + ConnectHijack + ConnectHTTPMitm +) + +var ( + OkConnect = &ConnectAction{Action: ConnectAccept} + MitmConnect = &ConnectAction{Action: ConnectMitm} + HTTPMitmConnect = &ConnectAction{Action: ConnectHTTPMitm} + RejectConnect = &ConnectAction{Action: ConnectReject} +) + +type ConnectAction struct { + Action ConnectActionLiteral + TlsConfig *tls.Config + Ca *tls.Certificate + Hijack func(req *http.Request, client net.Conn, ctx *ProxyCtx) +} + +func stripPort(s string) string { + ix := strings.IndexRune(s, ':') + if ix == -1 { + return s + } + return s[:ix] +} + +func (proxy *ProxyHttpServer) dial(network, addr string) (c net.Conn, err error) { + if proxy.Tr.Dial != nil { + return proxy.Tr.Dial(network, addr) + } + return net.Dial(network, addr) +} + +func (proxy *ProxyHttpServer) connectDial(network, addr string) (c net.Conn, err error) { + if proxy.ConnectDial == nil { + return proxy.dial(network, addr) + } + return proxy.ConnectDial(network, addr) +} + +func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request) { + ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy} + + hij, ok := w.(http.Hijacker) + if !ok { + panic("httpserver does not support hijacking") + } + + proxyClient, _, e := hij.Hijack() + if e != nil { + panic("Cannot hijack connection " + e.Error()) + } + + ctx.Logf("Running %d CONNECT handlers", len(proxy.httpsHandlers)) + todo, host := OkConnect, r.URL.Host + ctx.Req = r + for _, h := range proxy.httpsHandlers { + newtodo, newhost := h.HandleConnect(host, ctx) + if newtodo != nil { + todo, host = newtodo, newhost + } + ctx.Logf("handler: %v %s", todo, host) + } + switch todo.Action { + case ConnectAccept: + if !hasPort.MatchString(host) { + host += ":80" + } + targetSiteCon, err := proxy.connectDial("tcp", host) + if err != nil { + httpError(proxyClient, ctx, err) + return + } + ctx.Logf("Accepting CONNECT to %s", host) + proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) + go copyAndClose(ctx, targetSiteCon, proxyClient) + go copyAndClose(ctx, proxyClient, targetSiteCon) + case ConnectHijack: + ctx.Logf("Hijacking CONNECT to %s", host) + proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) + todo.Hijack(r, proxyClient, ctx) + case ConnectHTTPMitm: + proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) + ctx.Logf("Assuming CONNECT is plain HTTP tunneling, mitm proxying it") + targetSiteCon, err := proxy.connectDial("tcp", host) + if err != nil { + ctx.Warnf("Error dialing to %s: %s", host, err.Error()) + return + } + for { + client := bufio.NewReader(proxyClient) + remote := bufio.NewReader(targetSiteCon) + req, err := http.ReadRequest(client) + if err != nil && err != io.EOF { + ctx.Warnf("cannot read request of MITM HTTP client: %+#v", err) + } + if err != nil { + return + } + req, resp := proxy.filterRequest(req, ctx) + if resp == nil { + if err := req.Write(targetSiteCon); err != nil { + httpError(proxyClient, ctx, err) + return + } + resp, err = http.ReadResponse(remote, req) + if err != nil { + httpError(proxyClient, ctx, err) + return + } + } + resp = proxy.filterResponse(resp, ctx) + if err := resp.Write(proxyClient); err != nil { + httpError(proxyClient, ctx, err) + return + } + } + case ConnectMitm: + proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n")) + ctx.Logf("Assuming CONNECT is TLS, mitm proxying it") + // this goes in a separate goroutine, so that the net/http server won't think we're + // still handling the request even after hijacking the connection. Those HTTP CONNECT + // request can take forever, and the server will be stuck when "closed". + // TODO: Allow Server.Close() mechanism to shut down this connection as nicely as possible + ca := todo.Ca + if ca == nil { + ca = &GoproxyCa + } + cert, err := signHost(*ca, []string{stripPort(host)}) + if err != nil { + ctx.Warnf("Cannot sign host certificate with provided CA: %s", err) + return + } + tlsConfig := tls.Config{} + if todo.TlsConfig != nil { + tlsConfig = *todo.TlsConfig + } else { + tlsConfig = *defaultTlsConfig + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + go func() { + //TODO: cache connections to the remote website + rawClientTls := tls.Server(proxyClient, &tlsConfig) + if err := rawClientTls.Handshake(); err != nil { + ctx.Warnf("Cannot handshake client %v %v", r.Host, err) + return + } + defer rawClientTls.Close() + clientTlsReader := bufio.NewReader(rawClientTls) + for !isEof(clientTlsReader) { + req, err := http.ReadRequest(clientTlsReader) + if err != nil && err != io.EOF { + return + } + if err != nil { + ctx.Warnf("Cannot read TLS request from mitm'd client %v %v", r.Host, err) + return + } + ctx.Logf("req %v", r.Host) + req.URL, err = url.Parse("https://" + r.Host + req.URL.String()) + req, resp := proxy.filterRequest(req, ctx) + if resp == nil { + if err != nil { + ctx.Warnf("Illegal URL %s", "https://"+r.Host+req.URL.Path) + return + } + removeProxyHeaders(ctx, req) + resp, err = ctx.RoundTrip(req) + if err != nil { + ctx.Warnf("Cannot read TLS response from mitm'd server %v", err) + return + } + ctx.Logf("resp %v", resp.Status) + } + resp = proxy.filterResponse(resp, ctx) + text := resp.Status + statusCode := strconv.Itoa(resp.StatusCode) + " " + if strings.HasPrefix(text, statusCode) { + text = text[len(statusCode):] + } + // always use 1.1 to support chunked encoding + if _, err := io.WriteString(rawClientTls, "HTTP/1.1"+" "+statusCode+text+"\r\n"); err != nil { + ctx.Warnf("Cannot write TLS response HTTP status from mitm'd client: %v", err) + return + } + // Since we don't know the length of resp, return chunked encoded response + // TODO: use a more reasonable scheme + resp.Header.Del("Content-Length") + resp.Header.Set("Transfer-Encoding", "chunked") + if err := resp.Header.Write(rawClientTls); err != nil { + ctx.Warnf("Cannot write TLS response header from mitm'd client: %v", err) + return + } + if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil { + ctx.Warnf("Cannot write TLS response header end from mitm'd client: %v", err) + return + } + chunked := newChunkedWriter(rawClientTls) + if _, err := io.Copy(chunked, resp.Body); err != nil { + ctx.Warnf("Cannot write TLS response body from mitm'd client: %v", err) + return + } + if err := chunked.Close(); err != nil { + ctx.Warnf("Cannot write TLS chunked EOF from mitm'd client: %v", err) + return + } + if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil { + ctx.Warnf("Cannot write TLS response chunked trailer from mitm'd client: %v", err) + return + } + } + ctx.Logf("Exiting on EOF") + }() + case ConnectReject: + if ctx.Resp != nil { + if err := ctx.Resp.Write(proxyClient); err != nil { + ctx.Warnf("Cannot write response that reject http CONNECT: %v", err) + } + } + proxyClient.Close() + } +} + +func httpError(w io.WriteCloser, ctx *ProxyCtx, err error) { + if _, err := io.WriteString(w, "HTTP/1.1 502 Bad Gateway\r\n\r\n"); err != nil { + ctx.Warnf("Error responding to client: %s", err) + } + if err := w.Close(); err != nil { + ctx.Warnf("Error closing client connection: %s", err) + } +} + +func copyAndClose(ctx *ProxyCtx, w, r net.Conn) { + connOk := true + if _, err := io.Copy(w, r); err != nil { + connOk = false + ctx.Warnf("Error copying to client: %s", err) + } + if err := r.Close(); err != nil && connOk { + ctx.Warnf("Error closing: %s", err) + } +} + +func dialerFromEnv(proxy *ProxyHttpServer) func(network, addr string) (net.Conn, error) { + https_proxy := os.Getenv("HTTPS_PROXY") + if https_proxy == "" { + https_proxy = os.Getenv("https_proxy") + } + if https_proxy == "" { + return nil + } + return proxy.NewConnectDialToProxy(https_proxy) +} + +func (proxy *ProxyHttpServer) NewConnectDialToProxy(https_proxy string) func(network, addr string) (net.Conn, error) { + u, err := url.Parse(https_proxy) + if err != nil { + return nil + } + if u.Scheme == "" || u.Scheme == "http" { + if strings.IndexRune(u.Host, ':') == -1 { + u.Host += ":80" + } + return func(network, addr string) (net.Conn, error) { + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: make(http.Header), + } + c, err := proxy.dial(network, u.Host) + if err != nil { + return nil, err + } + connectReq.Write(c) + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(c) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + c.Close() + return nil, err + } + if resp.StatusCode != 200 { + resp, _ := ioutil.ReadAll(resp.Body) + c.Close() + return nil, errors.New("proxy refused connection" + string(resp)) + } + return c, nil + } + } + if u.Scheme == "https" { + if strings.IndexRune(u.Host, ':') == -1 { + u.Host += ":443" + } + return func(network, addr string) (net.Conn, error) { + c, err := proxy.dial(network, u.Host) + if err != nil { + return nil, err + } + c = tls.Client(c, proxy.Tr.TLSClientConfig) + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: make(http.Header), + } + connectReq.Write(c) + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(c) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + c.Close() + return nil, err + } + if resp.StatusCode != 200 { + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 500)) + resp.Body.Close() + c.Close() + return nil, errors.New("proxy refused connection" + string(body)) + } + return c, nil + } + } + return nil +} diff --git a/vendor/github.com/elazarl/goproxy/key.pem b/vendor/github.com/elazarl/goproxy/key.pem new file mode 100644 index 000000000000..2438b376032a --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/key.pem @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQC/P0FsJomPGzvdO9yreV4/faEAZ6tDVGC+VnrxnidmahUd+X7Y +2v+bR2Zb4Z05+lNyz8rN8mNgav/zjHnbh+K5HwZ1nQc61cnPIXmx6hadsEi7KvU9 +sSmBGEZAyqo5S6NgTF4tt80c8ignxdnVXPK/djGNuaNYD5L+4570da0NswIDAQAB +AoGBALzIv1b4D7ARTR3NOr6V9wArjiOtMjUrdLhO+9vIp9IEA8ZsA9gjDlCEwbkP +VDnoLjnWfraff5Os6+3JjHy1fYpUiCdnk2XA6iJSL1XWKQZPt3wOunxP4lalDgED +QTRReFbA/y/Z4kSfTXpVj68ytcvSRW/N7q5/qRtbN9804jpBAkEA0s6lvH2btSLA +mcEdwhs7zAslLbdld7rvfUeP82gPPk0S6yUqTNyikqshM9AwAktHY7WvYdKl+ghZ +HTxKVC4DoQJBAOg/IAW5RbXknP+Lf7AVtBgw3E+Yfa3mcdLySe8hjxxyZq825Zmu +Rt5Qj4Lw6ifSFNy4kiiSpE/ZCukYvUXGENMCQFkPxSWlS6tzSzuqQxBGwTSrYMG3 +wb6b06JyIXcMd6Qym9OMmBpw/J5KfnSNeDr/4uFVWQtTG5xO+pdHaX+3EQECQQDl +qcbY4iX1gWVfr2tNjajSYz751yoxVbkpiT9joiQLVXYFvpu+JYEfRzsjmWl0h2Lq +AftG8/xYmaEYcMZ6wSrRAkBUwiom98/8wZVlB6qbwhU1EKDFANvICGSWMIhPx3v7 +MJqTIj4uJhte2/uyVvZ6DC6noWYgy+kLgqG0S97tUEG8 +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/elazarl/goproxy/proxy.go b/vendor/github.com/elazarl/goproxy/proxy.go new file mode 100644 index 000000000000..28a76bfdf4c8 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/proxy.go @@ -0,0 +1,160 @@ +package goproxy + +import ( + "bufio" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "sync/atomic" +) + +// The basic proxy type. Implements http.Handler. +type ProxyHttpServer struct { + // session variable must be aligned in i386 + // see http://golang.org/src/pkg/sync/atomic/doc.go#L41 + sess int64 + // setting Verbose to true will log information on each request sent to the proxy + Verbose bool + Logger *log.Logger + NonproxyHandler http.Handler + reqHandlers []ReqHandler + respHandlers []RespHandler + httpsHandlers []HttpsHandler + Tr *http.Transport + // ConnectDial will be used to create TCP connections for CONNECT requests + // if nil Tr.Dial will be used + ConnectDial func(network string, addr string) (net.Conn, error) +} + +var hasPort = regexp.MustCompile(`:\d+$`) + +func copyHeaders(dst, src http.Header) { + for k, _ := range dst { + dst.Del(k) + } + for k, vs := range src { + for _, v := range vs { + dst.Add(k, v) + } + } +} + +func isEof(r *bufio.Reader) bool { + _, err := r.Peek(1) + if err == io.EOF { + return true + } + return false +} + +func (proxy *ProxyHttpServer) filterRequest(r *http.Request, ctx *ProxyCtx) (req *http.Request, resp *http.Response) { + req = r + for _, h := range proxy.reqHandlers { + req, resp = h.Handle(r, ctx) + // non-nil resp means the handler decided to skip sending the request + // and return canned response instead. + if resp != nil { + break + } + } + return +} +func (proxy *ProxyHttpServer) filterResponse(respOrig *http.Response, ctx *ProxyCtx) (resp *http.Response) { + resp = respOrig + for _, h := range proxy.respHandlers { + ctx.Resp = resp + resp = h.Handle(resp, ctx) + } + return +} + +func removeProxyHeaders(ctx *ProxyCtx, r *http.Request) { + r.RequestURI = "" // this must be reset when serving a request with the client + ctx.Logf("Sending request %v %v", r.Method, r.URL.String()) + // If no Accept-Encoding header exists, Transport will add the headers it can accept + // and would wrap the response body with the relevant reader. + r.Header.Del("Accept-Encoding") + // curl can add that, see + // http://homepage.ntlworld.com/jonathan.deboynepollard/FGA/web-proxy-connection-header.html + r.Header.Del("Proxy-Connection") + // Connection is single hop Header: + // http://www.w3.org/Protocols/rfc2616/rfc2616.txt + // 14.10 Connection + // The Connection general-header field allows the sender to specify + // options that are desired for that particular connection and MUST NOT + // be communicated by proxies over further connections. + r.Header.Del("Connection") +} + +// Standard net/http function. Shouldn't be used directly, http.Serve will use it. +func (proxy *ProxyHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + //r.Header["X-Forwarded-For"] = w.RemoteAddr() + if r.Method == "CONNECT" { + proxy.handleHttps(w, r) + } else { + ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy} + + var err error + ctx.Logf("Got request %v %v %v %v", r.URL.Path, r.Host, r.Method, r.URL.String()) + if !r.URL.IsAbs() { + proxy.NonproxyHandler.ServeHTTP(w, r) + return + } + r, resp := proxy.filterRequest(r, ctx) + + if resp == nil { + removeProxyHeaders(ctx, r) + resp, err = ctx.RoundTrip(r) + if err != nil { + ctx.Error = err + resp = proxy.filterResponse(nil, ctx) + if resp == nil { + ctx.Logf("error read response %v %v:", r.URL.Host, err.Error()) + http.Error(w, err.Error(), 500) + return + } + } + ctx.Logf("Received response %v", resp.Status) + } + origBody := resp.Body + resp = proxy.filterResponse(resp, ctx) + + ctx.Logf("Copying response to client %v [%d]", resp.Status, resp.StatusCode) + // http.ResponseWriter will take care of filling the correct response length + // Setting it now, might impose wrong value, contradicting the actual new + // body the user returned. + // We keep the original body to remove the header only if things changed. + // This will prevent problems with HEAD requests where there's no body, yet, + // the Content-Length header should be set. + if origBody != resp.Body { + resp.Header.Del("Content-Length") + } + copyHeaders(w.Header(), resp.Header) + w.WriteHeader(resp.StatusCode) + nr, err := io.Copy(w, resp.Body) + if err := resp.Body.Close(); err != nil { + ctx.Warnf("Can't close response body %v", err) + } + ctx.Logf("Copied %v bytes to client error=%v", nr, err) + } +} + +// New proxy server, logs to StdErr by default +func NewProxyHttpServer() *ProxyHttpServer { + proxy := ProxyHttpServer{ + Logger: log.New(os.Stderr, "", log.LstdFlags), + reqHandlers: []ReqHandler{}, + respHandlers: []RespHandler{}, + httpsHandlers: []HttpsHandler{}, + NonproxyHandler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + http.Error(w, "This is a proxy server, does not response to non-proxy requests", 500) + }), + Tr: &http.Transport{TLSClientConfig: tlsClientSkipVerify, + Proxy: http.ProxyFromEnvironment}, + } + proxy.ConnectDial = dialerFromEnv(&proxy) + return &proxy +} diff --git a/vendor/github.com/elazarl/goproxy/responses.go b/vendor/github.com/elazarl/goproxy/responses.go new file mode 100644 index 000000000000..b304b8829be9 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/responses.go @@ -0,0 +1,38 @@ +package goproxy + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// Will generate a valid http response to the given request the response will have +// the given contentType, and http status. +// Typical usage, refuse to process requests to local addresses: +// +// proxy.OnRequest(IsLocalHost()).DoFunc(func(r *http.Request, ctx *goproxy.ProxyCtx) (*http.Request,*http.Response) { +// return nil,NewResponse(r,goproxy.ContentTypeHtml,http.StatusUnauthorized, +// `Can't use proxy for local addresses`) +// }) +func NewResponse(r *http.Request, contentType string, status int, body string) *http.Response { + resp := &http.Response{} + resp.Request = r + resp.TransferEncoding = r.TransferEncoding + resp.Header = make(http.Header) + resp.Header.Add("Content-Type", contentType) + resp.StatusCode = status + buf := bytes.NewBufferString(body) + resp.ContentLength = int64(buf.Len()) + resp.Body = ioutil.NopCloser(buf) + return resp +} + +const ( + ContentTypeText = "text/plain" + ContentTypeHtml = "text/html" +) + +// Alias for NewResponse(r,ContentTypeText,http.StatusAccepted,text) +func TextResponse(r *http.Request, text string) *http.Response { + return NewResponse(r, ContentTypeText, http.StatusAccepted, text) +} diff --git a/vendor/github.com/elazarl/goproxy/signer.go b/vendor/github.com/elazarl/goproxy/signer.go new file mode 100644 index 000000000000..5ce8e6f5e303 --- /dev/null +++ b/vendor/github.com/elazarl/goproxy/signer.go @@ -0,0 +1,85 @@ +package goproxy + +import ( + "crypto/rsa" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "net" + "runtime" + "sort" + "time" +) + +func hashSorted(lst []string) []byte { + c := make([]string, len(lst)) + copy(c, lst) + sort.Strings(c) + h := sha1.New() + for _, s := range c { + h.Write([]byte(s + ",")) + } + return h.Sum(nil) +} + +func hashSortedBigInt(lst []string) *big.Int { + rv := new(big.Int) + rv.SetBytes(hashSorted(lst)) + return rv +} + +var goproxySignerVersion = ":goroxy1" + +func signHost(ca tls.Certificate, hosts []string) (cert tls.Certificate, err error) { + var x509ca *x509.Certificate + if x509ca, err = x509.ParseCertificate(GoproxyCa.Certificate[0]); err != nil { + return + } + start := time.Unix(0, 0) + end, err := time.Parse("2006-01-02", "2049-12-31") + if err != nil { + panic(err) + } + hash := hashSorted(append(hosts, goproxySignerVersion, ":"+runtime.Version())) + serial := new(big.Int) + serial.SetBytes(hash) + template := x509.Certificate{ + // TODO(elazar): instead of this ugly hack, just encode the certificate and hash the binary form. + SerialNumber: serial, + Issuer: x509ca.Subject, + Subject: pkix.Name{ + Organization: []string{"GoProxy untrusted MITM proxy Inc"}, + }, + NotBefore: start, + NotAfter: end, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + for _, h := range hosts { + if ip := net.ParseIP(h); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, h) + } + } + var csprng CounterEncryptorRand + if csprng, err = NewCounterEncryptorRandFromKey(ca.PrivateKey, hash); err != nil { + return + } + var certpriv *rsa.PrivateKey + if certpriv, err = rsa.GenerateKey(&csprng, 1024); err != nil { + return + } + var derBytes []byte + if derBytes, err = x509.CreateCertificate(&csprng, &template, x509ca, &certpriv.PublicKey, ca.PrivateKey); err != nil { + return + } + return tls.Certificate{ + Certificate: [][]byte{derBytes, ca.Certificate[0]}, + PrivateKey: certpriv, + }, nil +} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 000000000000..2092c72c46bd --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.8 + - 1.7 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - go get github.com/jessevdk/go-flags + +script: + - go get + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/BUILD b/vendor/github.com/evanphx/json-patch/BUILD new file mode 100644 index 000000000000..708065241d95 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "merge.go", + "patch.go", + ], + importpath = "github.com/evanphx/json-patch", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 000000000000..0eb9b72d84dd --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 000000000000..d0d826bacd23 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,29 @@ +## JSON-Patch + +Provides the ability to modify and test a JSON according to a +[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7396 JSON Merge Patch](https://tools.ietf.org/html/rfc7396). + +*Version*: **1.0** + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) + +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) + +### API Usage + +* Given a `[]byte`, obtain a Patch object + + `obj, err := jsonpatch.DecodePatch(patch)` + +* Apply the patch and get a new document back + + `out, err := obj.Apply(doc)` + +* Create a JSON Merge Patch document based on two json documents (a to b): + + `mergeDoc, err := jsonpatch.CreateMergePatch(a, b)` + +* Bonus API: compare documents for structural equality + + `jsonpatch.Equal(doca, docb)` + diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 000000000000..b9af252fec35 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,305 @@ +package jsonpatch + +import ( + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// CreateMergePatch creates a merge patch as specified in http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +// +// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. +// The function will return a mergeable json document with differences from a to b. +// +// An error will be returned if any of the two documents are invalid. +func CreateMergePatch(a, b []byte) ([]byte, error) { + aI := map[string]interface{}{} + bI := map[string]interface{}{} + err := json.Unmarshal(a, &aI) + if err != nil { + return nil, errBadJSONDoc + } + err = json.Unmarshal(b, &bI) + if err != nil { + return nil, errBadJSONDoc + } + dest, err := getDiff(aI, bI) + if err != nil { + return nil, err + } + return json.Marshal(dest) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 000000000000..de70dbca78ed --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,643 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" +) + +const ( + eRaw = iota + eDoc + eAry +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +type operation map[string]*json.RawMessage + +// Patch is an ordered collection of operations. +type Patch []operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, fmt.Errorf("Unknown type") + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +func (o operation) kind() string { + if obj, ok := o["op"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) path() string { + if obj, ok := o["path"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) from() string { + if obj, ok := o["from"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +func (d *partialArray) set(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + sz := len(*d) + if idx+1 > sz { + sz = idx + 1 + } + + ary := make([]*lazyNode, sz) + + cur := *d + + copy(ary, cur) + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + ary[idx] = val + + *d = ary + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + ary := make([]*lazyNode, len(*d)+1) + + cur := *d + + if idx < 0 { + idx *= -1 + + if idx > len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + idx = len(ary) - idx + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, fmt.Errorf("Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return fmt.Errorf("Unable to remove invalid index: %d", idx) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: %s", path) + } + + return con.add(key, op.value()) +} + +func (p Patch) remove(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: %s", path) + } + + return con.remove(key) +} + +func (p Patch) replace(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) + } + + val, ok := con.get(key) + if val == nil || ok != nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) + } + + return con.set(key, op.value()) +} + +func (p Patch) move(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + err = con.remove(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +func (p Patch) test(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + + if err != nil { + return err + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return fmt.Errorf("Testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return fmt.Errorf("Testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + for _, op := range p { + switch op.kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op) + default: + err = fmt.Errorf("Unexpected kind: %s", op.kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/garyburd/redigo/LICENSE b/vendor/github.com/garyburd/redigo/LICENSE new file mode 100644 index 000000000000..67db8588217f --- /dev/null +++ b/vendor/github.com/garyburd/redigo/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/garyburd/redigo/internal/BUILD b/vendor/github.com/garyburd/redigo/internal/BUILD new file mode 100644 index 000000000000..d42e1240463f --- /dev/null +++ b/vendor/github.com/garyburd/redigo/internal/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["commandinfo.go"], + importpath = "github.com/garyburd/redigo/internal", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/github.com/garyburd/redigo/internal/commandinfo.go new file mode 100644 index 000000000000..11e584257370 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/internal/commandinfo.go @@ -0,0 +1,54 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package internal // import "github.com/garyburd/redigo/internal" + +import ( + "strings" +) + +const ( + WatchState = 1 << iota + MultiState + SubscribeState + MonitorState +) + +type CommandInfo struct { + Set, Clear int +} + +var commandInfos = map[string]CommandInfo{ + "WATCH": {Set: WatchState}, + "UNWATCH": {Clear: WatchState}, + "MULTI": {Set: MultiState}, + "EXEC": {Clear: WatchState | MultiState}, + "DISCARD": {Clear: WatchState | MultiState}, + "PSUBSCRIBE": {Set: SubscribeState}, + "SUBSCRIBE": {Set: SubscribeState}, + "MONITOR": {Set: MonitorState}, +} + +func init() { + for n, ci := range commandInfos { + commandInfos[strings.ToLower(n)] = ci + } +} + +func LookupCommandInfo(commandName string) CommandInfo { + if ci, ok := commandInfos[commandName]; ok { + return ci + } + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/vendor/github.com/garyburd/redigo/redis/BUILD b/vendor/github.com/garyburd/redigo/redis/BUILD new file mode 100644 index 000000000000..bd63e24f78a7 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "conn.go", + "doc.go", + "go16.go", + "go17.go", + "go18.go", + "log.go", + "pool.go", + "pubsub.go", + "redis.go", + "reply.go", + "scan.go", + "script.go", + ], + importpath = "github.com/garyburd/redigo/redis", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/garyburd/redigo/internal:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/garyburd/redigo/redis/conn.go b/vendor/github.com/garyburd/redigo/redis/conn.go new file mode 100644 index 000000000000..5aa0f32f2e27 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/conn.go @@ -0,0 +1,673 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/url" + "regexp" + "strconv" + "sync" + "time" +) + +var ( + _ ConnWithTimeout = (*conn)(nil) +) + +// conn is the low-level implementation of Conn +type conn struct { + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +// +// Deprecated: Use Dial with options instead. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + return Dial(network, address, + DialConnectTimeout(connectTimeout), + DialReadTimeout(readTimeout), + DialWriteTimeout(writeTimeout)) +} + +// DialOption specifies an option for dialing a Redis server. +type DialOption struct { + f func(*dialOptions) +} + +type dialOptions struct { + readTimeout time.Duration + writeTimeout time.Duration + dialer *net.Dialer + dial func(network, addr string) (net.Conn, error) + db int + password string + useTLS bool + skipVerify bool + tlsConfig *tls.Config +} + +// DialReadTimeout specifies the timeout for reading a single command reply. +func DialReadTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.readTimeout = d + }} +} + +// DialWriteTimeout specifies the timeout for writing a single command. +func DialWriteTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.writeTimeout = d + }} +} + +// DialConnectTimeout specifies the timeout for connecting to the Redis server when +// no DialNetDial option is specified. +func DialConnectTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.dialer.Timeout = d + }} +} + +// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server +// when no DialNetDial option is specified. +// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then +// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected. +func DialKeepAlive(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.dialer.KeepAlive = d + }} +} + +// DialNetDial specifies a custom dial function for creating TCP +// connections, otherwise a net.Dialer customized via the other options is used. +// DialNetDial overrides DialConnectTimeout and DialKeepAlive. +func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { + return DialOption{func(do *dialOptions) { + do.dial = dial + }} +} + +// DialDatabase specifies the database to select when dialing a connection. +func DialDatabase(db int) DialOption { + return DialOption{func(do *dialOptions) { + do.db = db + }} +} + +// DialPassword specifies the password to use when connecting to +// the Redis server. +func DialPassword(password string) DialOption { + return DialOption{func(do *dialOptions) { + do.password = password + }} +} + +// DialTLSConfig specifies the config to use when a TLS connection is dialed. +// Has no effect when not dialing a TLS connection. +func DialTLSConfig(c *tls.Config) DialOption { + return DialOption{func(do *dialOptions) { + do.tlsConfig = c + }} +} + +// DialTLSSkipVerify disables server name verification when connecting over +// TLS. Has no effect when not dialing a TLS connection. +func DialTLSSkipVerify(skip bool) DialOption { + return DialOption{func(do *dialOptions) { + do.skipVerify = skip + }} +} + +// DialUseTLS specifies whether TLS should be used when connecting to the +// server. This option is ignore by DialURL. +func DialUseTLS(useTLS bool) DialOption { + return DialOption{func(do *dialOptions) { + do.useTLS = useTLS + }} +} + +// Dial connects to the Redis server at the given network and +// address using the specified options. +func Dial(network, address string, options ...DialOption) (Conn, error) { + do := dialOptions{ + dialer: &net.Dialer{ + KeepAlive: time.Minute * 5, + }, + } + for _, option := range options { + option.f(&do) + } + if do.dial == nil { + do.dial = do.dialer.Dial + } + + netConn, err := do.dial(network, address) + if err != nil { + return nil, err + } + + if do.useTLS { + var tlsConfig *tls.Config + if do.tlsConfig == nil { + tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify} + } else { + tlsConfig = cloneTLSConfig(do.tlsConfig) + } + if tlsConfig.ServerName == "" { + host, _, err := net.SplitHostPort(address) + if err != nil { + netConn.Close() + return nil, err + } + tlsConfig.ServerName = host + } + + tlsConn := tls.Client(netConn, tlsConfig) + if err := tlsConn.Handshake(); err != nil { + netConn.Close() + return nil, err + } + netConn = tlsConn + } + + c := &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: do.readTimeout, + writeTimeout: do.writeTimeout, + } + + if do.password != "" { + if _, err := c.Do("AUTH", do.password); err != nil { + netConn.Close() + return nil, err + } + } + + if do.db != 0 { + if _, err := c.Do("SELECT", do.db); err != nil { + netConn.Close() + return nil, err + } + } + + return c, nil +} + +var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) + +// DialURL connects to a Redis server at the given URL using the Redis +// URI scheme. URLs should follow the draft IANA specification for the +// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). +func DialURL(rawurl string, options ...DialOption) (Conn, error) { + u, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + + if u.Scheme != "redis" && u.Scheme != "rediss" { + return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) + } + + // As per the IANA draft spec, the host defaults to localhost and + // the port defaults to 6379. + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // assume port is missing + host = u.Host + port = "6379" + } + if host == "" { + host = "localhost" + } + address := net.JoinHostPort(host, port) + + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + options = append(options, DialPassword(password)) + } + } + + match := pathDBRegexp.FindStringSubmatch(u.Path) + if len(match) == 2 { + db := 0 + if len(match[1]) > 0 { + db, err = strconv.Atoi(match[1]) + if err != nil { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + } + if db != 0 { + options = append(options, DialDatabase(db)) + } + } else if u.Path != "" { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + + options = append(options, DialUseTLS(u.Scheme == "rediss")) + + return Dial("tcp", address, options...) +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + c.writeLen('$', len(s)) + c.bw.WriteString(s) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + c.writeLen('$', len(p)) + c.bw.Write(p) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) error { + c.writeLen('*', 1+len(args)) + if err := c.writeString(cmd); err != nil { + return err + } + for _, arg := range args { + if err := c.writeArg(arg, true); err != nil { + return err + } + } + return nil +} + +func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) { + switch arg := arg.(type) { + case string: + return c.writeString(arg) + case []byte: + return c.writeBytes(arg) + case int: + return c.writeInt64(int64(arg)) + case int64: + return c.writeInt64(arg) + case float64: + return c.writeFloat64(arg) + case bool: + if arg { + return c.writeString("1") + } else { + return c.writeString("0") + } + case nil: + return c.writeString("") + case Argument: + if argumentTypeOK { + return c.writeArg(arg.RedisArg(), false) + } + // See comment in default clause below. + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + return c.writeBytes(buf.Bytes()) + default: + // This default clause is intended to handle builtin numeric types. + // The function should return an error for other types, but this is not + // done for compatibility with previous versions of the package. + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + return c.writeBytes(buf.Bytes()) + } +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +func (c *conn) readLine() ([]byte, error) { + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + return nil, protocolError("long response line") + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch { + case len(line) == 3 && line[1] == 'O' && line[2] == 'K': + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(string(line[1:])), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (interface{}, error) { + return c.ReceiveWithTimeout(c.readTimeout) +} + +func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { + var deadline time.Time + if timeout != 0 { + deadline = time.Now().Add(timeout) + } + c.conn.SetReadDeadline(deadline) + + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + return c.DoWithTimeout(c.readTimeout, cmd, args...) +} + +func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + + if cmd != "" { + if err := c.writeCommand(cmd, args); err != nil { + return nil, c.fatal(err) + } + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + var deadline time.Time + if readTimeout != 0 { + deadline = time.Now().Add(readTimeout) + } + c.conn.SetReadDeadline(deadline) + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/vendor/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/garyburd/redigo/redis/doc.go new file mode 100644 index 000000000000..1d19c166849a --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/doc.go @@ -0,0 +1,177 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to bulk strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Fprint(w, v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections support one concurrent caller to the Receive method and one +// concurrent caller to the Send and Flush methods. No other concurrency is +// supported including concurrent calls to the Do method. +// +// For full concurrent access to Redis, use the thread-safe Pool to get, use +// and release a connection from within a goroutine. Connections returned from +// a Pool have the concurrency restrictions described in the previous +// paragraph. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{Conn: c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +// +// Errors +// +// Connection methods return error replies from the server as type redis.Error. +// +// Call the connection Err() method to determine if the connection encountered +// non-recoverable error such as a network error or protocol parsing error. If +// Err() returns a non-nil value, then the connection is not usable and should +// be closed. +package redis // import "github.com/garyburd/redigo/redis" diff --git a/vendor/github.com/garyburd/redigo/redis/go16.go b/vendor/github.com/garyburd/redigo/redis/go16.go new file mode 100644 index 000000000000..f6b1a7ccdcee --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/go16.go @@ -0,0 +1,27 @@ +// +build !go1.7 + +package redis + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/garyburd/redigo/redis/go17.go b/vendor/github.com/garyburd/redigo/redis/go17.go new file mode 100644 index 000000000000..5f36379113c7 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/go17.go @@ -0,0 +1,29 @@ +// +build go1.7,!go1.8 + +package redis + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/github.com/garyburd/redigo/redis/go18.go b/vendor/github.com/garyburd/redigo/redis/go18.go new file mode 100644 index 000000000000..558363be39ae --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package redis + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + return cfg.Clone() +} diff --git a/vendor/github.com/garyburd/redigo/redis/log.go b/vendor/github.com/garyburd/redigo/redis/log.go new file mode 100644 index 000000000000..b2996611c6d0 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/log.go @@ -0,0 +1,134 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" + "time" +) + +var ( + _ ConnWithTimeout = (*loggingConn)(nil) +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) { + reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...) + c.print("DoWithTimeout", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} + +func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) { + reply, err := ReceiveWithTimeout(c.Conn, timeout) + c.print("ReceiveWithTimeout", "", nil, reply, err) + return reply, err +} diff --git a/vendor/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/garyburd/redigo/redis/pool.go new file mode 100644 index 000000000000..5af97a058c21 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/pool.go @@ -0,0 +1,469 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "container/list" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "time" + + "github.com/garyburd/redigo/internal" +) + +var ( + _ ConnWithTimeout = (*pooledConnection)(nil) + _ ConnWithTimeout = (*errorConnection)(nil) +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errPoolClosed = errors.New("redigo: connection pool closed") + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a package level variable. The pool configuration used +// here is an example, not a recommendation. +// +// func newPool(addr string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// ... +// } +// +// Use the Dial function to authenticate connections with the AUTH command or +// select a database with the SELECT command: +// +// pool := &redis.Pool{ +// // Other pool configuration not shown in this example. +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// if _, err := c.Do("SELECT", db); err != nil { +// c.Close() +// return nil, err +// } +// return c, nil +// }, +// } +// +// Use the TestOnBorrow function to check the health of an idle connection +// before the connection is returned to the application. This example PINGs +// connections that have been idle more than a minute: +// +// pool := &redis.Pool{ +// // Other pool configuration not shown in this example. +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// if time.Since(t) < time.Minute { +// return nil +// } +// _, err := c.Do("PING") +// return err +// }, +// } +// +type Pool struct { + // Dial is an application supplied function for creating and configuring a + // connection. + // + // The connection returned from Dial must not be in a special state + // (subscribed to pubsub channel, transaction started, ...). + Dial func() (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxActive limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // mu protects fields defined below. + mu sync.Mutex + cond *sync.Cond + closed bool + active int + + // Stack of idleConn with most recently used at the front. + idle list.List +} + +type idleConn struct { + c Conn + t time.Time +} + +// NewPool creates a new pool. +// +// Deprecated: Initialize the Pool directory as shown in the example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + c, err := p.get() + if err != nil { + return errorConnection{err} + } + return &pooledConnection{p: p, c: c} +} + +// PoolStats contains pool statistics. +type PoolStats struct { + // ActiveCount is the number of connections in the pool. The count includes idle connections and connections in use. + ActiveCount int + // IdleCount is the number of idle connections in the pool. + IdleCount int +} + +// Stats returns pool's statistics. +func (p *Pool) Stats() PoolStats { + p.mu.Lock() + stats := PoolStats{ + ActiveCount: p.active, + IdleCount: p.idle.Len(), + } + p.mu.Unlock() + + return stats +} + +// ActiveCount returns the number of connections in the pool. The count includes idle connections and connections in use. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// IdleCount returns the number of idle connections in the pool. +func (p *Pool) IdleCount() int { + p.mu.Lock() + idle := p.idle.Len() + p.mu.Unlock() + return idle +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + idle := p.idle + p.idle.Init() + p.closed = true + p.active -= idle.Len() + if p.cond != nil { + p.cond.Broadcast() + } + p.mu.Unlock() + for e := idle.Front(); e != nil; e = e.Next() { + e.Value.(idleConn).c.Close() + } + return nil +} + +// release decrements the active count and signals waiters. The caller must +// hold p.mu during the call. +func (p *Pool) release() { + p.active -= 1 + if p.cond != nil { + p.cond.Signal() + } +} + +// get prunes stale connections and returns a connection from the idle list or +// creates a new connection. +func (p *Pool) get() (Conn, error) { + p.mu.Lock() + + // Prune stale connections. + + if timeout := p.IdleTimeout; timeout > 0 { + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Back() + if e == nil { + break + } + ic := e.Value.(idleConn) + if ic.t.Add(timeout).After(nowFunc()) { + break + } + p.idle.Remove(e) + p.release() + p.mu.Unlock() + ic.c.Close() + p.mu.Lock() + } + } + + for { + // Get idle connection. + + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Front() + if e == nil { + break + } + ic := e.Value.(idleConn) + p.idle.Remove(e) + test := p.TestOnBorrow + p.mu.Unlock() + if test == nil || test(ic.c, ic.t) == nil { + return ic.c, nil + } + ic.c.Close() + p.mu.Lock() + p.release() + } + + // Check for pool closed before dialing a new connection. + + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Dial new connection if under limit. + + if p.MaxActive == 0 || p.active < p.MaxActive { + dial := p.Dial + p.active += 1 + p.mu.Unlock() + c, err := dial() + if err != nil { + p.mu.Lock() + p.release() + p.mu.Unlock() + c = nil + } + return c, err + } + + if !p.Wait { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + if p.cond == nil { + p.cond = sync.NewCond(&p.mu) + } + p.cond.Wait() + } +} + +func (p *Pool) put(c Conn, forceClose bool) error { + err := c.Err() + p.mu.Lock() + if !p.closed && err == nil && !forceClose { + p.idle.PushFront(idleConn{t: nowFunc(), c: c}) + if p.idle.Len() > p.MaxIdle { + c = p.idle.Remove(p.idle.Back()).(idleConn).c + } else { + c = nil + } + } + + if c == nil { + if p.cond != nil { + p.cond.Signal() + } + p.mu.Unlock() + return nil + } + + p.release() + p.mu.Unlock() + return c.Close() +} + +type pooledConnection struct { + p *Pool + c Conn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) + sentinel = h.Sum(nil) + } +} + +func (pc *pooledConnection) Close() error { + c := pc.c + if _, ok := c.(errorConnection); ok { + return nil + } + pc.c = errorConnection{errConnClosed} + + if pc.state&internal.MultiState != 0 { + c.Send("DISCARD") + pc.state &^= (internal.MultiState | internal.WatchState) + } else if pc.state&internal.WatchState != 0 { + c.Send("UNWATCH") + pc.state &^= internal.WatchState + } + if pc.state&internal.SubscribeState != 0 { + c.Send("UNSUBSCRIBE") + c.Send("PUNSUBSCRIBE") + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + c.Send("ECHO", sentinel) + c.Flush() + for { + p, err := c.Receive() + if err != nil { + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + pc.state &^= internal.SubscribeState + break + } + } + } + c.Do("") + pc.p.put(c, pc.state != 0) + return nil +} + +func (pc *pooledConnection) Err() error { + return pc.c.Err() +} + +func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (pc *pooledConnection) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) { + cwt, ok := pc.c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return cwt.DoWithTimeout(timeout, commandName, args...) +} + +func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (pc *pooledConnection) Flush() error { + return pc.c.Flush() +} + +func (pc *pooledConnection) Receive() (reply interface{}, err error) { + return pc.c.Receive() +} + +func (pc *pooledConnection) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { + cwt, ok := pc.c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.ReceiveWithTimeout(timeout) +} + +type errorConnection struct{ err error } + +func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConnection) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) { + return nil, ec.err +} +func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } +func (ec errorConnection) Err() error { return ec.err } +func (ec errorConnection) Close() error { return nil } +func (ec errorConnection) Flush() error { return ec.err } +func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } +func (ec errorConnection) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err } diff --git a/vendor/github.com/garyburd/redigo/redis/pubsub.go b/vendor/github.com/garyburd/redigo/redis/pubsub.go new file mode 100644 index 000000000000..f0ac82532c77 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/pubsub.go @@ -0,0 +1,157 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "time" +) + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PMessage represents a pmessage notification. +type PMessage struct { + // The matched pattern. + Pattern string + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// Pong represents a pubsub pong notification. +type Pong struct { + Data string +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + c.Conn.Send("SUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + c.Conn.Send("PSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + c.Conn.Send("UNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + c.Conn.Send("PUNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Ping sends a PING to the server with the specified data. +// +// The connection must be subscribed to at least one channel or pattern when +// calling this method. +func (c PubSubConn) Ping(data string) error { + c.Conn.Send("PING", data) + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, PMessage, Pong +// or error. The return value is intended to be used directly in a type switch +// as illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + return c.receiveInternal(c.Conn.Receive()) +} + +// ReceiveWithTimeout is like Receive, but it allows the application to +// override the connection's default timeout. +func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} { + return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout)) +} + +func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} { + reply, err := Values(replyArg, errArg) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var pm PMessage + if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { + return err + } + return pm + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + case "pong": + var p Pong + if _, err := Scan(reply, &p.Data); err != nil { + return err + } + return p + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/vendor/github.com/garyburd/redigo/redis/redis.go b/vendor/github.com/garyburd/redigo/redis/redis.go new file mode 100644 index 000000000000..141fa4a91211 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/redis.go @@ -0,0 +1,117 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "time" +) + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value when the connection is not usable. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} + +// Argument is the interface implemented by an object which wants to control how +// the object is converted to Redis bulk strings. +type Argument interface { + // RedisArg returns a value to be encoded as a bulk string per the + // conversions listed in the section 'Executing Commands'. + // Implementations should typically return a []byte or string. + RedisArg() interface{} +} + +// Scanner is implemented by an object which wants to control its value is +// interpreted when read from Redis. +type Scanner interface { + // RedisScan assigns a value from a Redis value. The argument src is one of + // the reply types listed in the section `Executing Commands`. + // + // An error should be returned if the value cannot be stored without + // loss of information. + RedisScan(src interface{}) error +} + +// ConnWithTimeout is an optional interface that allows the caller to override +// a connection's default read timeout. This interface is useful for executing +// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the +// server. +// +// A connection's default read timeout is set with the DialReadTimeout dial +// option. Applications should rely on the default timeout for commands that do +// not block at the server. +// +// All of the Conn implementations in this package satisfy the ConnWithTimeout +// interface. +// +// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify +// use of this interface. +type ConnWithTimeout interface { + Conn + + // Do sends a command to the server and returns the received reply. + // The timeout overrides the read timeout set when dialing the + // connection. + DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) + + // Receive receives a single reply from the Redis server. The timeout + // overrides the read timeout set when dialing the connection. + ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) +} + +var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout") + +// DoWithTimeout executes a Redis command with the specified read timeout. If +// the connection does not satisfy the ConnWithTimeout interface, then an error +// is returned. +func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { + cwt, ok := c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.DoWithTimeout(timeout, cmd, args...) +} + +// ReceiveWithTimeout receives a reply with the specified read timeout. If the +// connection does not satisfy the ConnWithTimeout interface, then an error is +// returned. +func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) { + cwt, ok := c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.ReceiveWithTimeout(timeout) +} diff --git a/vendor/github.com/garyburd/redigo/redis/reply.go b/vendor/github.com/garyburd/redigo/redis/reply.go new file mode 100644 index 000000000000..c2b3b2b6e748 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/reply.go @@ -0,0 +1,479 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +var errNegativeInt = errors.New("redigo: unexpected value for Uint64") + +// Uint64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to an int as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is a helper that converts an array command reply to a []interface{}. +// +// Deprecated: Use Values instead. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error { + if err != nil { + return err + } + switch reply := reply.(type) { + case []interface{}: + makeSlice(len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + if err := assign(i, reply[i]); err != nil { + return err + } + } + return nil + case nil: + return ErrNil + case Error: + return reply + } + return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply) +} + +// Float64s is a helper that converts an array command reply to a []float64. If +// err is not equal to nil, then Float64s returns nil, err. Nil array items are +// converted to 0 in the output slice. Floats64 returns an error if an array +// item is not a bulk string or nil. +func Float64s(reply interface{}, err error) ([]float64, error) { + var result []float64 + err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error { + p, ok := v.([]byte) + if !ok { + return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v) + } + f, err := strconv.ParseFloat(string(p), 64) + result[i] = f + return err + }) + return result, err +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + var result []string + err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case string: + result[i] = v + return nil + case []byte: + result[i] = string(v) + return nil + default: + return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v) + } + }) + return result, err +} + +// ByteSlices is a helper that converts an array command reply to a [][]byte. +// If err is not equal to nil, then ByteSlices returns nil, err. Nil array +// items are stay nil. ByteSlices returns an error if an array item is not a +// bulk string or nil. +func ByteSlices(reply interface{}, err error) ([][]byte, error) { + var result [][]byte + err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error { + p, ok := v.([]byte) + if !ok { + return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v) + } + result[i] = p + return nil + }) + return result, err +} + +// Int64s is a helper that converts an array command reply to a []int64. +// If err is not equal to nil, then Int64s returns nil, err. Nil array +// items are stay nil. Int64s returns an error if an array item is not a +// bulk string or nil. +func Int64s(reply interface{}, err error) ([]int64, error) { + var result []int64 + err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case int64: + result[i] = v + return nil + case []byte: + n, err := strconv.ParseInt(string(v), 10, 64) + result[i] = n + return err + default: + return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v) + } + }) + return result, err +} + +// Ints is a helper that converts an array command reply to a []in. +// If err is not equal to nil, then Ints returns nil, err. Nil array +// items are stay nil. Ints returns an error if an array item is not a +// bulk string or nil. +func Ints(reply interface{}, err error) ([]int, error) { + var result []int + err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case int64: + n := int(v) + if int64(n) != v { + return strconv.ErrRange + } + result[i] = n + return nil + case []byte: + n, err := strconv.Atoi(string(v)) + result[i] = n + return err + default: + return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v) + } + }) + return result, err +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: StringMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} + +// IntMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]int. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func IntMap(result interface{}, err error) (map[string]int, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: IntMap expects even number of values result") + } + m := make(map[string]int, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: IntMap key not a bulk string value") + } + value, err := Int(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Int64Map is a helper that converts an array of strings (alternating key, value) +// into a map[string]int64. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func Int64Map(result interface{}, err error) (map[string]int64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: Int64Map expects even number of values result") + } + m := make(map[string]int64, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: Int64Map key not a bulk string value") + } + value, err := Int64(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Positions is a helper that converts an array of positions (lat, long) +// into a [][2]float64. The GEOPOS command returns replies in this format. +func Positions(result interface{}, err error) ([]*[2]float64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + positions := make([]*[2]float64, len(values)) + for i := range values { + if values[i] == nil { + continue + } + p, ok := values[i].([]interface{}) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i]) + } + if len(p) != 2 { + return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p)) + } + lat, err := Float64(p[0], nil) + if err != nil { + return nil, err + } + long, err := Float64(p[1], nil) + if err != nil { + return nil, err + } + positions[i] = &[2]float64{lat, long} + } + return positions, nil +} diff --git a/vendor/github.com/garyburd/redigo/redis/scan.go b/vendor/github.com/garyburd/redigo/redis/scan.go new file mode 100644 index 000000000000..ef9551bd476a --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/scan.go @@ -0,0 +1,585 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + var sname string + switch s.(type) { + case string: + sname = "Redis simple string" + case Error: + sname = "Redis error" + case int64: + sname = "Redis integer" + case []byte: + sname = "Redis bulk string" + case []interface{}: + sname = "Redis array" + default: + sname = reflect.TypeOf(s).String() + } + return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) +} + +func convertAssignBulkString(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(string(s), d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(string(s)) + d.SetBool(x) + case reflect.String: + d.SetString(string(s)) + case reflect.Slice: + if d.Type().Elem().Kind() != reflect.Uint8 { + err = cannotConvert(d, s) + } else { + d.SetBytes(s) + } + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + if d.Kind() != reflect.Ptr { + if d.CanAddr() { + d2 := d.Addr() + if d2.CanInterface() { + if scanner, ok := d2.Interface().(Scanner); ok { + return scanner.RedisScan(s) + } + } + } + } else if d.CanInterface() { + // Already a reflect.Ptr + if d.IsNil() { + d.Set(reflect.New(d.Type().Elem())) + } + if scanner, ok := d.Interface().(Scanner); ok { + return scanner.RedisScan(s) + } + } + + switch s := s.(type) { + case []byte: + err = convertAssignBulkString(d, s) + case int64: + err = convertAssignInt(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignArray(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + if scanner, ok := d.(Scanner); ok { + return scanner.RedisScan(s) + } + + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ignore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBulkString(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case string: + switch d := d.(type) { + case *string: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignArray(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// Scan uses RedisScan if available otherwise: +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo.Scan: array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "" && !f.Anonymous: + // Ignore unexported fields. + case f.Anonymous: + // TODO: Handle pointers. Requires change to decoder and + // protection against infinite recursion. + if f.Type.Kind() == reflect.Struct { + compileStructSpec(f.Type, depth, append(index, i), ss) + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + p := strings.Split(tag, ",") + if len(p) > 0 { + if p[0] == "-" { + continue + } + if len(p[0]) > 0 { + fs.name = p[0] + } + for _, s := range p[1:] { + switch s { + case "omitempty": + fs.omitEmpty = true + default: + panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) + defaultFieldSpec = &fieldSpec{} +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Each field uses RedisScan if available otherwise: +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo.ScanStruct: number of values not a multiple of 2") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. The elements the dest +// slice must be integer, float, boolean, string, struct or pointer to struct +// values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo.ScanSlice: no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo.ScanSlice: length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + if fs.omitEmpty { + var empty = false + switch fv.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + empty = fv.Len() == 0 + case reflect.Bool: + empty = !fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + empty = fv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + empty = fv.Uint() == 0 + case reflect.Float32, reflect.Float64: + empty = fv.Float() == 0 + case reflect.Interface, reflect.Ptr: + empty = fv.IsNil() + } + if empty { + continue + } + } + args = append(args, fs.name, fv.Interface()) + } + return args +} diff --git a/vendor/github.com/garyburd/redigo/redis/script.go b/vendor/github.com/garyburd/redigo/redis/script.go new file mode 100644 index 000000000000..0ef1c821f714 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/script.go @@ -0,0 +1,91 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Hash returns the script hash. +func (s *Script) Hash() string { + return s.hash +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/dir-symlink b/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/dir-symlink new file mode 120000 index 000000000000..777ebd014e07 --- /dev/null +++ b/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/dir-symlink @@ -0,0 +1 @@ +../../testdata \ No newline at end of file diff --git a/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/file-symlink b/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/file-symlink new file mode 120000 index 000000000000..4c52274de03d --- /dev/null +++ b/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/file-symlink @@ -0,0 +1 @@ +../test.file \ No newline at end of file diff --git a/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/invalid-symlink b/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/invalid-symlink new file mode 120000 index 000000000000..0edf4f301b10 --- /dev/null +++ b/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/invalid-symlink @@ -0,0 +1 @@ +/non/existing/file \ No newline at end of file diff --git a/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/windows-file-symlink b/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/windows-file-symlink new file mode 120000 index 000000000000..af1d6c8f5739 --- /dev/null +++ b/vendor/github.com/golang/dep/internal/fs/testdata/symlinks/windows-file-symlink @@ -0,0 +1 @@ +C:/Users/ibrahim/go/src/github.com/golang/dep/internal/fs/testdata/test.file \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 000000000000..6b0b1270ff0c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD b/vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD new file mode 100644 index 000000000000..1a90af784694 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "OpenAPIv2.go", + "OpenAPIv2.pb.go", + ], + importpath = "github.com/googleapis/gnostic/OpenAPIv2", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/any:go_default_library", + "//vendor/github.com/googleapis/gnostic/compiler:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go new file mode 100644 index 000000000000..0e32451a320c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -0,0 +1,8728 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v2" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := in.(bool) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in interface{}, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes, _ := yaml.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := v14.([]interface{}) + if ok { + for _, item := range a { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = v18.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"$ref", "description"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in interface{}, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedString{} + pair.Name = k + pair.Value = v.(string) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := v1.([]interface{}) + if ok { + x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v8.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := v10.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v9.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.MultipleOf = v6 + case float32: + x.MultipleOf = float64(v6) + case uint64: + x.MultipleOf = float64(v6) + case uint32: + x.MultipleOf = float64(v6) + case int64: + x.MultipleOf = float64(v6) + case int32: + x.MultipleOf = float64(v6) + case int: + x.MultipleOf = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + switch v7 := v7.(type) { + case float64: + x.Maximum = v7 + case float32: + x.Maximum = float64(v7) + case uint64: + x.Maximum = float64(v7) + case uint32: + x.Maximum = float64(v7) + case int64: + x.Maximum = float64(v7) + case int32: + x.Maximum = float64(v7) + case int: + x.Maximum = float64(v7) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = v8.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + switch v9 := v9.(type) { + case float64: + x.Minimum = v9 + case float32: + x.Minimum = float64(v9) + case uint64: + x.Minimum = float64(v9) + case uint32: + x.Minimum = float64(v9) + case int64: + x.Minimum = float64(v9) + case int32: + x.Minimum = float64(v9) + case int: + x.Minimum = float64(v9) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = v10.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := v12.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = v13.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = v16.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := v19.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := v24.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = v26.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = v27.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + a, ok := in.([]interface{}) + if !ok { + message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Value = make([]string, 0) + for _, s := range a { + x.Value = append(x.Value, s.(string)) + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + switch in := in.(type) { + case string: + x.Value = make([]string, 0) + x.Value = append(x.Value, in) + case []interface{}: + x.Value = make([]string, 0) + for _, v := range in { + value, ok := v.(string) + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return v1.Boolean + } + return nil +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() interface{} { + var err error + var info1 []yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info1) + if err == nil { + return info1 + } + var info2 yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info2) + if err == nil { + return info2 + } + var info3 interface{} + err = yaml.Unmarshal([]byte(m.Yaml), &info3) + if err == nil { + return info3 + } + return nil +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.Schema != nil { + info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.Email != "" { + info = append(info, yaml.MapItem{"email", m.Email}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Swagger != "" { + info = append(info, yaml.MapItem{"swagger", m.Swagger}) + } + if m.Info != nil { + info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) + } + // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Host != "" { + info = append(info, yaml.MapItem{"host", m.Host}) + } + if m.BasePath != "" { + info = append(info, yaml.MapItem{"basePath", m.BasePath}) + } + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{"schemes", m.Schemes}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{"consumes", m.Consumes}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{"produces", m.Produces}) + } + if m.Paths != nil { + info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) + } + // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Definitions != nil { + info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) + } + // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Parameters != nil { + info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) + } + // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Responses != nil { + info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"security", items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.SecurityDefinitions != nil { + info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) + } + // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Tags) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Tags { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"tags", items}) + } + // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Version != "" { + info = append(info, yaml.MapItem{"version", m.Version}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.TermsOfService != "" { + info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) + } + if m.Contact != nil { + info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) + } + // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.License != nil { + info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) + } + // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Schema) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"schema", items}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Value != "" { + info = append(info, yaml.MapItem{"value", m.Value}) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + } + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() interface{} { + info := yaml.MapSlice{} + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Tags) != 0 { + info = append(info, yaml.MapItem{"tags", m.Tags}) + } + if m.Summary != "" { + info = append(info, yaml.MapItem{"summary", m.Summary}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.OperationId != "" { + info = append(info, yaml.MapItem{"operationId", m.OperationId}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{"produces", m.Produces}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{"consumes", m.Consumes}) + } + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"parameters", items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.Responses != nil { + info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{"schemes", m.Schemes}) + } + if m.Deprecated != false { + info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) + } + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"security", items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Get != nil { + info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) + } + // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Put != nil { + info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) + } + // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Post != nil { + info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) + } + // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Delete != nil { + info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) + } + // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Options != nil { + info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) + } + // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Head != nil { + info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) + } + // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Patch != nil { + info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) + } + // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"parameters", items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + if m.Path != nil { + for _, item := range m.Path { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Schema != nil { + info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Headers != nil { + info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) + } + // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Examples != nil { + info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) + } + // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if m.MaxProperties != 0 { + info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) + } + if m.MinProperties != 0 { + info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) + } + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.AdditionalProperties != nil { + info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) + } + // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Type != nil { + if len(m.Type.Value) == 1 { + info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) + } else { + info = append(info, yaml.MapItem{"type", m.Type.Value}) + } + } + // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Items != nil { + items := make([]interface{}, 0) + for _, item := range m.Items.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"items", items[0]}) + } + // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.AllOf) != 0 { + items := make([]interface{}, 0) + for _, item := range m.AllOf { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"allOf", items}) + } + // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.Properties != nil { + info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) + } + // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Discriminator != "" { + info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + } + if m.Xml != nil { + info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) + } + // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() interface{} { + return m.Value +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Value) != 0 { + info = append(info, yaml.MapItem{"value", m.Value}) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Namespace != "" { + info = append(info, yaml.MapItem{"namespace", m.Namespace}) + } + if m.Prefix != "" { + info = append(info, yaml.MapItem{"prefix", m.Prefix}) + } + if m.Attribute != false { + info = append(info, yaml.MapItem{"attribute", m.Attribute}) + } + if m.Wrapped != false { + info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go new file mode 100644 index 000000000000..37da7df256f9 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -0,0 +1,4456 @@ +// Code generated by protoc-gen-go. +// source: OpenAPIv2/OpenAPIv2.proto +// DO NOT EDIT! + +/* +Package openapi_v2 is a generated protocol buffer package. + +It is generated from these files: + OpenAPIv2/OpenAPIv2.proto + +It has these top-level messages: + AdditionalPropertiesItem + Any + ApiKeySecurity + BasicAuthenticationSecurity + BodyParameter + Contact + Default + Definitions + Document + Examples + ExternalDocs + FileSchema + FormDataParameterSubSchema + Header + HeaderParameterSubSchema + Headers + Info + ItemsItem + JsonReference + License + NamedAny + NamedHeader + NamedParameter + NamedPathItem + NamedResponse + NamedResponseValue + NamedSchema + NamedSecurityDefinitionsItem + NamedString + NamedStringArray + NonBodyParameter + Oauth2AccessCodeSecurity + Oauth2ApplicationSecurity + Oauth2ImplicitSecurity + Oauth2PasswordSecurity + Oauth2Scopes + Operation + Parameter + ParameterDefinitions + ParametersItem + PathItem + PathParameterSubSchema + Paths + PrimitivesItems + Properties + QueryParameterSubSchema + Response + ResponseDefinitions + ResponseValue + Responses + Schema + SchemaItem + SecurityDefinitions + SecurityDefinitionsItem + SecurityRequirement + StringArray + Tag + TypeItem + VendorExtension + Xml +*/ +package openapi_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AdditionalPropertiesItem struct { + // Types that are valid to be assigned to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } +} + +func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *AdditionalPropertiesItem_Boolean: + t := uint64(0) + if x.Boolean { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AdditionalPropertiesItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &AdditionalPropertiesItem_Schema{msg} + return true, err + case 2: // oneof.boolean + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} + return true, err + default: + return false, nil + } +} + +func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AdditionalPropertiesItem_Boolean: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Any struct { + Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Any) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +func (m *Any) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +type ApiKeySecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ApiKeySecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ApiKeySecurity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiKeySecurity) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *ApiKeySecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *BasicAuthenticationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BodyParameter struct { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BodyParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BodyParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BodyParameter) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *BodyParameter) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *BodyParameter) GetSchema() *Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *BodyParameter) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *Contact) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Default struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Default) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Definitions) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Document struct { + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Document) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Document) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Document) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Document) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Document) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Document) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Document) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Document) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +func (m *Document) GetDefinitions() *Definitions { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *Document) GetParameters() *ParameterDefinitions { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Document) GetResponses() *ResponseDefinitions { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Document) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { + if m != nil { + return m.SecurityDefinitions + } + return nil +} + +func (m *Document) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Document) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Document) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Examples struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Examples) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ExternalDocs) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocs) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ExternalDocs) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FileSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FileSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *FileSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FileSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FileSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *FileSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FileSchema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *FileSchema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *FileSchema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *FileSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *FormDataParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *FormDataParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FormDataParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *FormDataParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FormDataParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *FormDataParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *FormDataParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *FormDataParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Header struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Header) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Header) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Header) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *Header) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *Header) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Header) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Header) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Header) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Header) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Header) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Header) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Header) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Header) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Header) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Header) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Header) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Header) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Header) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Header) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *HeaderParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *HeaderParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *HeaderParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HeaderParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HeaderParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *HeaderParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *HeaderParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *HeaderParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Headers struct { + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *Headers) GetAdditionalProperties() []*NamedHeader { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetLicense() *License { + if m != nil { + return m.License + } + return nil +} + +func (m *Info) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type ItemsItem struct { + Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` +} + +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ItemsItem) GetSchema() []*Schema { + if m != nil { + return m.Schema + } + return nil +} + +type JsonReference struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *JsonReference) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *JsonReference) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type License struct { + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *License) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *License) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *License) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *NamedAny) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedAny) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *NamedHeader) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedHeader) GetValue() *Header { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *NamedParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedParameter) GetValue() *Parameter { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *NamedPathItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedPathItem) GetValue() *PathItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *NamedResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponse) GetValue() *Response { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *NamedResponseValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponseValue) GetValue() *ResponseValue { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *NamedSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSchema) GetValue() *Schema { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NamedSecurityDefinitionsItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *NamedString) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NamedStringArray) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedStringArray) GetValue() *StringArray { + if m != nil { + return m.Value + } + return nil +} + +type NonBodyParameter struct { + // Types that are valid to be assigned to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` +} +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` +} +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` +} +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } +} + +func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_FormDataParameterSubSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_QueryParameterSubSchema: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_PathParameterSubSchema: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NonBodyParameter) + switch tag { + case 1: // oneof.header_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HeaderParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} + return true, err + case 2: // oneof.form_data_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FormDataParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} + return true, err + case 3: // oneof.query_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} + return true, err + case 4: // oneof.path_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PathParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} + return true, err + default: + return false, nil + } +} + +func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + s := proto.Size(x.HeaderParameterSubSchema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_FormDataParameterSubSchema: + s := proto.Size(x.FormDataParameterSubSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_QueryParameterSubSchema: + s := proto.Size(x.QueryParameterSubSchema) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_PathParameterSubSchema: + s := proto.Size(x.PathParameterSubSchema) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *Oauth2AccessCodeSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *Oauth2ApplicationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *Oauth2ImplicitSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *Oauth2PasswordSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2PasswordSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Operation) GetResponses() *Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *Operation) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Operation) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Parameter struct { + // Types that are valid to be assigned to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` +} +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } +} + +func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BodyParameter); err != nil { + return err + } + case *Parameter_NonBodyParameter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NonBodyParameter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Parameter) + switch tag { + case 1: // oneof.body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_BodyParameter{msg} + return true, err + case 2: // oneof.non_body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NonBodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_NonBodyParameter{msg} + return true, err + default: + return false, nil + } +} + +func _Parameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + s := proto.Size(x.BodyParameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Parameter_NonBodyParameter: + s := proto.Size(x.NonBodyParameter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + // Types that are valid to be assigned to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` +} +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ParametersItem) GetParameter() *Parameter { + if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (m *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } +} + +func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Parameter); err != nil { + return err + } + case *ParametersItem_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ParametersItem) + switch tag { + case 1: // oneof.parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Parameter) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_Parameter{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ParametersItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + s := proto.Size(x.Parameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ParametersItem_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PathItem struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *PathItem) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *PathItem) GetGet() *Operation { + if m != nil { + return m.Get + } + return nil +} + +func (m *PathItem) GetPut() *Operation { + if m != nil { + return m.Put + } + return nil +} + +func (m *PathItem) GetPost() *Operation { + if m != nil { + return m.Post + } + return nil +} + +func (m *PathItem) GetDelete() *Operation { + if m != nil { + return m.Delete + } + return nil +} + +func (m *PathItem) GetOptions() *Operation { + if m != nil { + return m.Options + } + return nil +} + +func (m *PathItem) GetHead() *Operation { + if m != nil { + return m.Head + } + return nil +} + +func (m *PathItem) GetPatch() *Operation { + if m != nil { + return m.Patch + } + return nil +} + +func (m *PathItem) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *PathItem) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *PathParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *PathParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *PathParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PathParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PathParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PathParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PathParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PathParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PathParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PathParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PathParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PathParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PathParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PathParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *Paths) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func (m *Paths) GetPath() []*NamedPathItem { + if m != nil { + return m.Path + } + return nil +} + +type PrimitivesItems struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *PrimitivesItems) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PrimitivesItems) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PrimitivesItems) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PrimitivesItems) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PrimitivesItems) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PrimitivesItems) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PrimitivesItems) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PrimitivesItems) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PrimitivesItems) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PrimitivesItems) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PrimitivesItems) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PrimitivesItems) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PrimitivesItems) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PrimitivesItems) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PrimitivesItems) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Properties struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *Properties) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *QueryParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *QueryParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *QueryParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QueryParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *QueryParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *QueryParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *QueryParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *QueryParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *QueryParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *QueryParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *QueryParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *QueryParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *QueryParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *QueryParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Response struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *Response) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Response) GetSchema() *SchemaItem { + if m != nil { + return m.Schema + } + return nil +} + +func (m *Response) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Response) GetExamples() *Examples { + if m != nil { + return m.Examples + } + return nil +} + +func (m *Response) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// One or more JSON representations for parameters +type ResponseDefinitions struct { + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + // Types that are valid to be assigned to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` +} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ResponseValue) GetResponse() *Response { + if x, ok := m.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (m *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } +} + +func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case *ResponseValue_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) + } + return nil +} + +func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseValue) + switch tag { + case 1: // oneof.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Response) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_Response{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + s := proto.Size(x.Response) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseValue_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *Responses) GetResponseCode() []*NamedResponseValue { + if m != nil { + return m.ResponseCode + } + return nil +} + +func (m *Responses) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *Schema) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *Schema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Schema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Schema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Schema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Schema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Schema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Schema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Schema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Schema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Schema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Schema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Schema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Schema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Schema) GetMaxProperties() int64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *Schema) GetMinProperties() int64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *Schema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *Schema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *Schema) GetType() *TypeItem { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema) GetItems() *ItemsItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *Schema) GetAllOf() []*Schema { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *Schema) GetProperties() *Properties { + if m != nil { + return m.Properties + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetXml() *Xml { + if m != nil { + return m.Xml + } + return nil +} + +func (m *Schema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *Schema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type SchemaItem struct { + // Types that are valid to be assigned to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SchemaItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } +} + +func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *SchemaItem_FileSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SchemaItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_Schema{msg} + return true, err + case 2: // oneof.file_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileSchema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_FileSchema{msg} + return true, err + default: + return false, nil + } +} + +func _SchemaItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SchemaItem_FileSchema: + s := proto.Size(x.FileSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + // Types that are valid to be assigned to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` +} +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } +} + +func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_ApiKeySecurity: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecurityDefinitionsItem) + switch tag { + case 1: // oneof.basic_authentication_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BasicAuthenticationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} + return true, err + case 2: // oneof.api_key_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ApiKeySecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} + return true, err + case 3: // oneof.oauth2_implicit_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ImplicitSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} + return true, err + case 4: // oneof.oauth2_password_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2PasswordSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} + return true, err + case 5: // oneof.oauth2_application_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ApplicationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} + return true, err + case 6: // oneof.oauth2_access_code_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2AccessCodeSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} + return true, err + default: + return false, nil + } +} + +func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + s := proto.Size(x.BasicAuthenticationSecurity) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_ApiKeySecurity: + s := proto.Size(x.ApiKeySecurity) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + s := proto.Size(x.Oauth2ImplicitSecurity) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + s := proto.Size(x.Oauth2PasswordSecurity) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + s := proto.Size(x.Oauth2ApplicationSecurity) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + s := proto.Size(x.Oauth2AccessCodeSecurity) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type StringArray struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +func (m *StringArray) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Tag struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *Tag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Tag) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type TypeItem struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +func (m *TypeItem) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } + +func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Xml struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } + +func (m *Xml) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Xml) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Xml) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Xml) GetAttribute() bool { + if m != nil { + return m.Attribute + } + return false +} + +func (m *Xml) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *Xml) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func init() { + proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") + proto.RegisterType((*Any)(nil), "openapi.v2.Any") + proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") + proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") + proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") + proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") + proto.RegisterType((*Default)(nil), "openapi.v2.Default") + proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") + proto.RegisterType((*Document)(nil), "openapi.v2.Document") + proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") + proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") + proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") + proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") + proto.RegisterType((*Header)(nil), "openapi.v2.Header") + proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") + proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") + proto.RegisterType((*Info)(nil), "openapi.v2.Info") + proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") + proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") + proto.RegisterType((*License)(nil), "openapi.v2.License") + proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") + proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") + proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") + proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") + proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") + proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") + proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") + proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") + proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") + proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") + proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") + proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") + proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") + proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") + proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") + proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") + proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") + proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") + proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") + proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") + proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") + proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") + proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") + proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") + proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") + proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") + proto.RegisterType((*Response)(nil), "openapi.v2.Response") + proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") + proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") + proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") + proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") + proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") + proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") + proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") + proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") + proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") + proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") + proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") + proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") + proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") +} + +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 3129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, + 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, + 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, + 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, + 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, + 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, + 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, + 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, + 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, + 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, + 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, + 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, + 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, + 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, + 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, + 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, + 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, + 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, + 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, + 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, + 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, + 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, + 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, + 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, + 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, + 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, + 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, + 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, + 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, + 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, + 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, + 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, + 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, + 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, + 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, + 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, + 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, + 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, + 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, + 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, + 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, + 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, + 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, + 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, + 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, + 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, + 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, + 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, + 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, + 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, + 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, + 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, + 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, + 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, + 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, + 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, + 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, + 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, + 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, + 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, + 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, + 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, + 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, + 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, + 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, + 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, + 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, + 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, + 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, + 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, + 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, + 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, + 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, + 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, + 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, + 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, + 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, + 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, + 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, + 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, + 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, + 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, + 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, + 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, + 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, + 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, + 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, + 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, + 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, + 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, + 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, + 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, + 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, + 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, + 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, + 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, + 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, + 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, + 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, + 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, + 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, + 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, + 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, + 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, + 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, + 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, + 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, + 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, + 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, + 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, + 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, + 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, + 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, + 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, + 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, + 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, + 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, + 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, + 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, + 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, + 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, + 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, + 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, + 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, + 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, + 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, + 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, + 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, + 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, + 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, + 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, + 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, + 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, + 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, + 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, + 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, + 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, + 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, + 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, + 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, + 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, + 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, + 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, + 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, + 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, + 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, + 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, + 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, + 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, + 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, + 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, + 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, + 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, + 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, + 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, + 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, + 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, + 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, + 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, + 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, + 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, + 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, + 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, + 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, + 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, + 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, + 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, + 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, + 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, + 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, + 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, + 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, + 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, + 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, + 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, + 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, + 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, + 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, + 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, + 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, + 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, + 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, + 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, + 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, + 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, + 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, + 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, + 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, + 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, + 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, + 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, + 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, + 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, + 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto new file mode 100644 index 000000000000..557c88072cd8 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -0,0 +1,663 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for parameters +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md new file mode 100644 index 000000000000..836fb32a7ef0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -0,0 +1,16 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model +and related code for supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto +to generate Protocol Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI +descriptions into the Protocol Buffer-based datastructures +generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic +compiler generator, and OpenAPIv2.pb.go is generated by +protoc, the Protocol Buffer compiler, and protoc-gen-go, the +Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json new file mode 100644 index 000000000000..2815a26ea718 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/BUILD b/vendor/github.com/googleapis/gnostic/compiler/BUILD new file mode 100644 index 000000000000..3cde6d1b978f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "error.go", + "extension-handler.go", + "helpers.go", + "main.go", + "reader.go", + ], + importpath = "github.com/googleapis/gnostic/compiler", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/any:go_default_library", + "//vendor/github.com/googleapis/gnostic/extensions:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 000000000000..848b16c69b80 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,3 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 000000000000..a64c1b75db40 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,43 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } + return context.Name +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 000000000000..d8672c1008b7 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return "ERROR " + err.Message + } + return "ERROR " + err.Context.Description() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go new file mode 100644 index 000000000000..1f85b650e817 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + + "strings" + + "errors" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + ext_plugin "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v2" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// HandleExtension calls a binary extension handler. +func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { + handled := false + var errFromPlugin error + var outFromPlugin *any.Any + + if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { + for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { + outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) + if outFromPlugin == nil { + continue + } else { + handled = true + break + } + } + } + return handled, outFromPlugin, errFromPlugin +} + +func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + binary, _ := yaml.Marshal(in) + + request := &ext_plugin.ExtensionHandlerRequest{} + + version := &ext_plugin.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.Wrapper = &ext_plugin.Wrapper{} + + request.Wrapper.Version = "v2" + request.Wrapper.Yaml = string(binary) + request.Wrapper.ExtensionName = extensionName + + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + + if err != nil { + fmt.Printf("Error: %+v\n", err) + return nil, err + } + response := &ext_plugin.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil { + fmt.Printf("Error: %+v\n", err) + fmt.Printf("%s\n", string(output)) + return nil, err + } + if !response.Handled { + return nil, nil + } + if len(response.Error) != 0 { + message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) + return nil, errors.New(message) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 000000000000..76df635ff9bf --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,197 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "regexp" + "sort" + "strconv" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a yaml.MapSlice if possible. +func UnpackMap(in interface{}) (yaml.MapSlice, bool) { + m, ok := in.(yaml.MapSlice) + if ok { + return m, true + } + // do we have an empty array? + a, ok := in.([]interface{}) + if ok && len(a) == 0 { + // if so, return an empty map + return yaml.MapSlice{}, true + } + return nil, false +} + +// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. +func SortedKeysForMap(m yaml.MapSlice) []string { + keys := make([]string, 0) + for _, item := range m { + keys = append(keys, item.Key.(string)) + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yaml.MapSlice contains a specified key. +func MapHasKey(m yaml.MapSlice, key string) bool { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return true + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m yaml.MapSlice, key string) interface{} { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return item.Value + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok { + key := itemKey + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + } + return invalidKeys +} + +// DescribeMap describes a map (for debugging purposes). +func DescribeMap(in interface{}, indent string) string { + description := "" + m, ok := in.(map[string]interface{}) + if ok { + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := m[k] + description += fmt.Sprintf("%s%s:\n", indent, k) + description += DescribeMap(v, indent+" ") + } + return description + } + a, ok := in.([]interface{}) + if ok { + for i, v := range a { + description += fmt.Sprintf("%s%d:\n", indent, i) + description += DescribeMap(v, indent+" ") + } + return description + } + description += fmt.Sprintf("%s%+v\n", indent, in) + return description +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 000000000000..9713a21cca26 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 000000000000..2d4b3303db44 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,173 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "errors" + "fmt" + "gopkg.in/yaml.v2" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +var fileCache map[string][]byte +var infoCache map[string]interface{} +var count int64 + +var verboseReader = false + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]interface{}, 0) + } +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + initializeFileCache() + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + if verboseReader { + log.Printf("Fetching %s", fileurl) + } + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + if response.StatusCode != 200 { + return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) + } + defer response.Body.Close() + bytes, err = ioutil.ReadAll(response.Body) + if err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := FetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. +func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { + initializeInfoCache() + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + var info yaml.MapSlice + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + infoCache[filename] = info + return info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (interface{}, error) { + initializeInfoCache() + { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + count = count + 1 + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = basedir + parts[0] + } else { + filename = basefile + } + bytes, err := ReadBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := ReadInfoFromBytes(filename, bytes) + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m, ok := info.(yaml.MapSlice) + if ok { + found := false + for _, section := range m { + if section.Key == key { + info = section.Value + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + infoCache[ref] = info + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/BUILD b/vendor/github.com/googleapis/gnostic/extensions/BUILD new file mode 100644 index 000000000000..435cc2b42ec0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "extension.pb.go", + "extensions.go", + ], + importpath = "github.com/googleapis/gnostic/extensions", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/any:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh new file mode 100755 index 000000000000..68d02a02ac5d --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh @@ -0,0 +1,5 @@ +go get github.com/golang/protobuf/protoc-gen-go + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto + diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 000000000000..ff1c2eb1e3ad --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,5 @@ +# Extensions + +This directory contains support code for building Gnostic extensions and associated examples. + +Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 000000000000..7c6b91496732 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,219 @@ +// Code generated by protoc-gen-go. +// source: extension.proto +// DO NOT EDIT! + +/* +Package openapiextension_v1 is a generated protocol buffer package. + +It is generated from these files: + extension.proto + +It has these top-level messages: + Version + ExtensionHandlerRequest + ExtensionHandlerResponse + Wrapper +*/ +package openapiextension_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of OpenAPI compiler. +type Version struct { + Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil { + return m.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` + // The version number of openapi compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` + // text output + Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExtensionHandlerResponse) GetHandled() bool { + if m != nil { + return m.Handled + } + return false +} + +func (m *ExtensionHandlerResponse) GetError() []string { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +type Wrapper struct { + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // Name of the extension + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` + // Must be a valid yaml for the proto + Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Wrapper) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Wrapper) GetExtensionName() string { + if m != nil { + return m.ExtensionName + } + return "" +} + +func (m *Wrapper) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") + proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") + proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") + proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") +} + +func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 355 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, + 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, + 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, + 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, + 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, + 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, + 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, + 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, + 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, + 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, + 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, + 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, + 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, + 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, + 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, + 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, + 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, + 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, + 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, + 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, + 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, + 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, + 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 000000000000..806760a13292 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,93 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +package openapiextension.v1; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIExtensionV1"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "OAE"; // "OpenAPI Extension" + +// The version number of OpenAPI compiler. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper wrapper = 1; + + // The version number of openapi compiler. + Version compiler_version = 3; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string error = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension + string extension_name = 2; + + // Must be a valid yaml for the proto + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 000000000000..94a8e62a776e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapiextension_v1 + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type documentHandler func(version string, extensionName string, document string) +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +func forInputYamlFromOpenapic(handler documentHandler) { + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + fmt.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + fmt.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + fmt.Println("Input error:", err.Error()) + os.Exit(1) + } + handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) +} + +// ProcessExtension calles the handler for a specified extension. +func ProcessExtension(handleExtension extensionHandler) { + response := &ExtensionHandlerResponse{} + forInputYamlFromOpenapic( + func(version string, extensionName string, yamlInput string) { + var newObject proto.Message + var err error + + handled, newObject, err := handleExtension(extensionName, yamlInput) + if !handled { + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + // If we reach here, then the extension is handled + response.Handled = true + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + response.Value, err = ptypes.MarshalAny(newObject) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + }) + + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml new file mode 100644 index 000000000000..faca4dad3dca --- /dev/null +++ b/vendor/github.com/gorilla/context/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: tip + +install: + - go get golang.org/x/tools/cmd/vet + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/context/BUILD b/vendor/github.com/gorilla/context/BUILD new file mode 100644 index 000000000000..6c59219266ce --- /dev/null +++ b/vendor/github.com/gorilla/context/BUILD @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "doc.go", + ], + importpath = "github.com/gorilla/context", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE new file mode 100644 index 000000000000..0e5fb872800d --- /dev/null +++ b/vendor/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md new file mode 100644 index 000000000000..c60a31b053bc --- /dev/null +++ b/vendor/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go new file mode 100644 index 000000000000..81cb128b19ca --- /dev/null +++ b/vendor/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go new file mode 100644 index 000000000000..73c7400311e1 --- /dev/null +++ b/vendor/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml new file mode 100644 index 000000000000..3302233f3c37 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,22 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: 1.8 + - go: 1.9 + - go: tip + allow_failures: + - go: tip + +install: + - # Skip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/mux/BUILD b/vendor/github.com/gorilla/mux/BUILD new file mode 100644 index 000000000000..84a8ba24d44c --- /dev/null +++ b/vendor/github.com/gorilla/mux/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "context_gorilla.go", + "context_native.go", + "doc.go", + "middleware.go", + "mux.go", + "regexp.go", + "route.go", + "test_helpers.go", + ], + importpath = "github.com/gorilla/mux", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/gorilla/context:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md new file mode 100644 index 000000000000..232be82e47a6 --- /dev/null +++ b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md @@ -0,0 +1,11 @@ +**What version of Go are you running?** (Paste the output of `go version`) + + +**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`) + + +**Describe your problem** (and what you have tried so far) + + +**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it) + diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE new file mode 100644 index 000000000000..0e5fb872800d --- /dev/null +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md new file mode 100644 index 000000000000..f9b3103f0684 --- /dev/null +++ b/vendor/github.com/gorilla/mux/README.md @@ -0,0 +1,560 @@ +gorilla/mux +=== +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) + +![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) + +http://www.gorillatoolkit.org/pkg/mux + +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts, paths and query values can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. + +--- + +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Static Files](#static-files) +* [Registered URLs](#registered-urls) +* [Walking Routes](#walking-routes) +* [Graceful Shutdown](#graceful-shutdown) +* [Middleware](#middleware) +* [Full Example](#full-example) + +--- + +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` + +## Examples + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Category: %v\n", vars["category"]) +} +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +### Matching Routes + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.domain.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Routes are tested in the order they were added to the router. If two routes match, the first one wins: + +```go +r := mux.NewRouter() +r.HandleFunc("/specific", specificHandler) +r.PathPrefix("/").Handler(catchAllHandler) +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` +### Listing Routes + +Routes on a mux can be listed using the Router.Walk method—useful for generating documentation: + +```go +package main + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gorilla/mux" +) + +func handler(w http.ResponseWriter, r *http.Request) { + return +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.HandleFunc("/products", handler).Methods("POST") + r.HandleFunc("/articles", handler).Methods("GET") + r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") + r.HandleFunc("/authors", handler).Queries("surname", "{surname}") + r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + t, err := route.GetPathTemplate() + if err != nil { + return err + } + qt, err := route.GetQueriesTemplates() + if err != nil { + return err + } + // p will contain regular expression is compatible with regular expression in Perl, Python, and other languages. + // for instance the regular expression for path '/articles/{id}' will be '^/articles/(?P[^/]+)$' + p, err := route.GetPathRegexp() + if err != nil { + return err + } + // qr will contain a list of regular expressions with the same semantics as GetPathRegexp, + // just applied to the Queries pairs instead, e.g., 'Queries("surname", "{surname}") will return + // {"^surname=(?P.*)$}. Where each combined query pair will have an entry in the list. + qr, err := route.GetQueriesRegexp() + if err != nil { + return err + } + m, err := route.GetMethods() + if err != nil { + return err + } + fmt.Println(strings.Join(m, ","), strings.Join(qt, ","), strings.Join(qr, ","), t, p) + return nil + }) + http.Handle("/", r) +} +``` + +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host and query value variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.domain.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.domain.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +### Walking Routes + +The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, +the following prints all of the registered routes: + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) +r.HandleFunc("/products", handler).Methods("POST") +r.HandleFunc("/articles", handler).Methods("GET") +r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") +r.HandleFunc("/authors", handler).Queries("surname", "{surname}") +r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + t, err := route.GetPathTemplate() + if err != nil { + return err + } + qt, err := route.GetQueriesTemplates() + if err != nil { + return err + } + // p will contain a regular expression that is compatible with regular expressions in Perl, Python, and other languages. + // For example, the regular expression for path '/articles/{id}' will be '^/articles/(?P[^/]+)$'. + p, err := route.GetPathRegexp() + if err != nil { + return err + } + // qr will contain a list of regular expressions with the same semantics as GetPathRegexp, + // just applied to the Queries pairs instead, e.g., 'Queries("surname", "{surname}") will return + // {"^surname=(?P.*)$}. Where each combined query pair will have an entry in the list. + qr, err := route.GetQueriesRegexp() + if err != nil { + return err + } + m, err := route.GetMethods() + if err != nil { + return err + } + fmt.Println(strings.Join(m, ","), strings.Join(qt, ","), strings.Join(qr, ","), t, p) + return nil +}) +``` + +### Graceful Shutdown + +Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: + +```go +package main + +import ( + "context" + "flag" + "log" + "net/http" + "os" + "os/signal" + + "github.com/gorilla/mux" +) + +func main() { + var wait time.Duration + flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") + flag.Parse() + + r := mux.NewRouter() + // Add your routes as needed + + srv := &http.Server{ + Addr: "0.0.0.0:8080", + // Good practice to set timeouts to avoid Slowloris attacks. + WriteTimeout: time.Second * 15, + ReadTimeout: time.Second * 15, + IdleTimeout: time.Second * 60, + Handler: r, // Pass our instance of gorilla/mux in. + } + + // Run our server in a goroutine so that it doesn't block. + go func() { + if err := srv.ListenAndServe(); err != nil { + log.Println(err) + } + }() + + c := make(chan os.Signal, 1) + // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) + // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. + signal.Notify(c, os.Interrupt) + + // Block until we receive our signal. + <-c + + // Create a deadline to wait for. + ctx, cancel := context.WithTimeout(ctx, wait) + // Doesn't block if no connections, but will otherwise wait + // until the timeout deadline. + srv.Shutdown(ctx) + // Optionally, you could run srv.Shutdown in a goroutine and block on + // <-ctx.Done() if your application should wait for other services + // to finalize based on context cancellation. + log.Println("shutting down") + os.Exit(0) +} +``` + +### Middleware + +Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. +Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. + +Mux middlewares are defined using the de facto standard type: + +```go +type MiddlewareFunc func(http.Handler) http.Handler +``` + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. + +A very basic middleware which logs the URI of the request being handled could be written as: + +```go +func simpleMw(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) +} +``` + +Middlewares can be added to a router using `Router.AddMiddlewareFunc()`: + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) +r.AddMiddleware(simpleMw) +``` + +A more complex authentication middleware, which maps session token to users, could be written as: + +```go +// Define our struct +type authenticationMiddleware struct { + tokenUsers map[string]string +} + +// Initialize it somewhere +func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" +} + +// Middleware function, which will be called for each request +func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + // Pass down the request to the next middleware (or final handler) + next.ServeHTTP(w, r) + } else { + // Write an error and stop the handler chain + http.Error(w, "Forbidden", 403) + } + }) +} +``` + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) + +amw := authenticationMiddleware{} +amw.Populate() + +r.AddMiddlewareFunc(amw.Middleware) +``` + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares *should* write to `ResponseWriter` if they *are* going to terminate the request, and they *should not* write to `ResponseWriter` if they *are not* going to terminate it. + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + "log" + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + log.Fatal(http.ListenAndServe(":8000", r)) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go new file mode 100644 index 000000000000..d7adaa8fad4f --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_gorilla.go @@ -0,0 +1,26 @@ +// +build !go1.7 + +package mux + +import ( + "net/http" + + "github.com/gorilla/context" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return context.Get(r, key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + context.Set(r, key, val) + return r +} + +func contextClear(r *http.Request) { + context.Clear(r) +} diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go new file mode 100644 index 000000000000..209cbea7d661 --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_native.go @@ -0,0 +1,24 @@ +// +build go1.7 + +package mux + +import ( + "context" + "net/http" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return r.Context().Value(key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + return r.WithContext(context.WithValue(r.Context(), key, val)) +} + +func contextClear(r *http.Request) { + return +} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go new file mode 100644 index 000000000000..013f088985b0 --- /dev/null +++ b/vendor/github.com/gorilla/mux/doc.go @@ -0,0 +1,307 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts, paths and query values can have variables with an optional + regular expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: + + r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +Note that if any capturing groups are present, mux will panic() during parsing. To prevent +this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to +"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably +when capturing groups were present. + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host and query value variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +Since **vX.Y.Z**, mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed if a +match is found (including subrouters). Middlewares are defined using the de facto standard type: + + type MiddlewareFunc func(http.Handler) http.Handler + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). + +A very basic middleware which logs the URI of the request being handled could be written as: + + func simpleMw(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) + } + +Middlewares can be added to a router using `Router.Use()`: + + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.AddMiddleware(simpleMw) + +A more complex authentication middleware, which maps session token to users, could be written as: + + // Define our struct + type authenticationMiddleware struct { + tokenUsers map[string]string + } + + // Initialize it somewhere + func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" + } + + // Middleware function, which will be called for each request + func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + next.ServeHTTP(w, r) + } else { + http.Error(w, "Forbidden", 403) + } + }) + } + + r := mux.NewRouter() + r.HandleFunc("/", handler) + + amw := authenticationMiddleware{} + amw.Populate() + + r.Use(amw.Middleware) + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. + +*/ +package mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go new file mode 100644 index 000000000000..8f898675ea74 --- /dev/null +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -0,0 +1,28 @@ +package mux + +import "net/http" + +// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. +// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed +// to it, and then calls the handler passed as parameter to the MiddlewareFunc. +type MiddlewareFunc func(http.Handler) http.Handler + +// middleware interface is anything which implements a MiddlewareFunc named Middleware. +type middleware interface { + Middleware(handler http.Handler) http.Handler +} + +// MiddlewareFunc also implements the middleware interface. +func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { + return mw(handler) +} + +// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) Use(mwf MiddlewareFunc) { + r.middlewares = append(r.middlewares, mwf) +} + +// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) useInterface(mw middleware) { + r.middlewares = append(r.middlewares, mw) +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go new file mode 100644 index 000000000000..efabd2417511 --- /dev/null +++ b/vendor/github.com/gorilla/mux/mux.go @@ -0,0 +1,585 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "path" + "regexp" +) + +var ( + ErrMethodMismatch = errors.New("method is not allowed") + ErrNotFound = errors.New("no matching route was found") +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + + // Configurable Handler to be used when the request method does not match the route. + MethodNotAllowedHandler http.Handler + + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // See Router.SkipClean(). This defines the flag for new routes. + skipClean bool + // If true, do not clear the request context after handling the request. + // This has no effect when go1.7+ is used, since the context is stored + // on the request itself. + KeepContext bool + // see Router.UseEncodedPath(). This defines a flag for all routes. + useEncodedPath bool + // Slice of middlewares to be called after a match is found + middlewares []middleware +} + +// Match attempts to match the given request against the router's registered routes. +// +// If the request matches a route of this router or one of its subrouters the Route, +// Handler, and Vars fields of the the match argument are filled and this function +// returns true. +// +// If the request does not match any of this router's or its subrouters' routes +// then this function returns false. If available, a reason for the match failure +// will be filled in the match argument's MatchErr field. If the match failure type +// (eg: not found) has a registered handler, the handler is assigned to the Handler +// field of the match argument. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + // Build middleware chain if no error was found + if match.MatchErr == nil { + for i := len(r.middlewares) - 1; i >= 0; i-- { + match.Handler = r.middlewares[i].Middleware(match.Handler) + } + } + return true + } + } + + if match.MatchErr == ErrMethodMismatch { + if r.MethodNotAllowedHandler != nil { + match.Handler = r.MethodNotAllowedHandler + return true + } else { + return false + } + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + match.MatchErr = ErrNotFound + return true + } + + match.MatchErr = ErrNotFound + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + req = setVars(req, match.Vars) + req = setCurrentRoute(req, match.Route) + } + + if handler == nil && match.MatchErr == ErrMethodMismatch { + handler = methodNotAllowedHandler() + } + + if handler == nil { + handler = http.NotFoundHandler() + } + + if !r.KeepContext { + defer contextClear(req) + } + + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will perform a redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for +// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed +// request will be made as a GET by most clients. Use middleware or client settings +// to modify this behaviour as needed. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +func (r *Router) getBuildScheme() string { + if r.parent != nil { + return r.parent.getBuildScheme() + } + return "" +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + if err != nil { + return err + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string + + // MatchErr is set to appropriate matching error + // It is set to ErrMethodMismatch if there is a mismatch in + // the request method and route method + MatchErr error +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := contextGet(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. +func CurrentRoute(r *http.Request) *Route { + if rv := contextGet(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) *http.Request { + return contextSet(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) *http.Request { + return contextSet(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// mapFromPairsToRegex converts variadic string parameters to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// methodNotAllowed replies to the request with an HTTP status code 405. +func methodNotAllowed(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusMethodNotAllowed) +} + +// methodNotAllowedHandler returns a simple request handler +// that replies to each request with a status code 405. +func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go new file mode 100644 index 000000000000..2b57e5627d54 --- /dev/null +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -0,0 +1,332 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +type routeRegexpOptions struct { + strictSlash bool + useEncodedPath bool +} + +type regexpType int + +const ( + regexpTypePath regexpType = 0 + regexpTypeHost regexpType = 1 + regexpTypePrefix regexpType = 2 + regexpTypeQuery regexpType = 3 +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if typ == regexpTypeQuery { + defaultPattern = ".*" + } else if typ == regexpTypeHost { + defaultPattern = "[^.]+" + } + // Only match strict slash if not matching + if typ != regexpTypePath { + options.strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if options.strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if options.strictSlash { + pattern.WriteString("[/]?") + } + if typ == regexpTypeQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if typ != regexpTypePrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + + // Check for capturing groups which used to work in older versions + if reg.NumSubexp() != len(idxs)/2 { + panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + + "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") + } + + // Done! + return &routeRegexp{ + template: template, + regexpType: typ, + options: options, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // The type of match + regexpType regexpType + // Options for matching + options routeRegexpOptions + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if r.regexpType != regexpTypeHost { + if r.regexpType == regexpTypeQuery { + return r.matchQueryString(req) + } + path := req.URL.Path + if r.options.useEncodedPath { + path = req.URL.EscapedPath() + } + return r.regexp.MatchString(path) + } + + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + if r.regexpType == regexpTypeQuery { + value = url.QueryEscape(value) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if r.regexpType != regexpTypeQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + var idxs []int + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + host := getHost(req) + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) + } + } + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Store path variables. + if v.path != nil { + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) + // Check if we should redirect. + if v.path.options.strictSlash { + p1 := strings.HasSuffix(path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] + } +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go new file mode 100644 index 000000000000..4ce098d4fbf0 --- /dev/null +++ b/vendor/github.com/gorilla/mux/route.go @@ -0,0 +1,761 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool + // The scheme used when building URLs. + buildScheme string + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + buildVarsFunc BuildVarsFunc +} + +func (r *Route) SkipClean() bool { + return r.skipClean +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + + var matchErr error + + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + if _, ok := m.(methodMatcher); ok { + matchErr = ErrMethodMismatch + continue + } + matchErr = nil + return false + } + } + + if matchErr != nil { + match.MatchErr = matchErr + return false + } + + if match.MatchErr == ErrMethodMismatch { + // We found a route which matches request method, clear MatchErr + match.MatchErr = nil + // Then override the mis-matched handler + match.Handler = r.handler + } + + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if typ == regexpTypePath || typ == regexpTypePrefix { + if len(tpl) > 0 && tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ + strictSlash: r.strictSlash, + useEncodedPath: r.useEncodedPath, + }) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if typ == regexpTypeHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if typ == regexpTypeQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// If the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// If the value is an empty string, it will match any value if the key is set. +// Use the start and end of string anchors (^ and $) to match an exact value. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypeHost) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +// Match returns the match for a given request. +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePath) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + if r.buildScheme == "" && len(schemes) > 0 { + r.buildScheme = schemes[0] + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + queries := make([]string, 0, len(r.regexp.queries)) + if r.regexp.host != nil { + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + scheme = "http" + if s := r.getBuildScheme(); s != "" { + scheme = s + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + for _, q := range r.regexp.queries { + var query string + if query, err = q.url(values); err != nil { + return nil, err + } + queries = append(queries, query) + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + RawQuery: strings.Join(queries, "&"), + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + u := &url.URL{ + Scheme: "http", + Host: host, + } + if s := r.getBuildScheme(); s != "" { + u.Scheme = s + } + return u, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetPathRegexp returns the expanded regular expression used to match route path. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathRegexp() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.path == nil { + return "", errors.New("mux: route does not have a path") + } + return r.regexp.path.regexp.String(), nil +} + +// GetQueriesRegexp returns the expanded regular expressions used to match the +// route queries. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An empty list will be returned if the route does not have queries. +func (r *Route) GetQueriesRegexp() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + var queries []string + for _, query := range r.regexp.queries { + queries = append(queries, query.regexp.String()) + } + return queries, nil +} + +// GetQueriesTemplates returns the templates used to build the +// query matching. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An empty list will be returned if the route does not define queries. +func (r *Route) GetQueriesTemplates() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + var queries []string + for _, query := range r.regexp.queries { + queries = append(queries, query.template) + } + return queries, nil +} + +// GetMethods returns the methods the route matches against +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An empty list will be returned if route does not have methods. +func (r *Route) GetMethods() ([]string, error) { + if r.err != nil { + return nil, r.err + } + for _, m := range r.matchers { + if methods, ok := m.(methodMatcher); ok { + return []string(methods), nil + } + } + return nil, nil +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getBuildScheme() string + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string +} + +func (r *Route) getBuildScheme() string { + if r.buildScheme != "" { + return r.buildScheme + } + if r.parent != nil { + return r.parent.getBuildScheme() + } + return "" +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go new file mode 100644 index 000000000000..8b2c4a4c580e --- /dev/null +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -0,0 +1,18 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import "net/http" + +// SetURLVars sets the URL variables for the given request, to be accessed via +// mux.Vars for testing route behaviour. +// +// This API should only be used for testing purposes; it provides a way to +// inject variables into the request context. Alternatively, URL variables +// can be set by making a route that captures the required variables, +// starting a server and sending the request to that server. +func SetURLVars(r *http.Request, val map[string]string) *http.Request { + return setVars(r, val) +} diff --git a/vendor/github.com/howeyc/gopass/.travis.yml b/vendor/github.com/howeyc/gopass/.travis.yml new file mode 100644 index 000000000000..cc5d509fdf5d --- /dev/null +++ b/vendor/github.com/howeyc/gopass/.travis.yml @@ -0,0 +1,11 @@ +language: go + +os: + - linux + - osx + +go: + - 1.3 + - 1.4 + - 1.5 + - tip diff --git a/vendor/github.com/howeyc/gopass/BUILD b/vendor/github.com/howeyc/gopass/BUILD new file mode 100644 index 000000000000..13c81e84512b --- /dev/null +++ b/vendor/github.com/howeyc/gopass/BUILD @@ -0,0 +1,95 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "pass.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "terminal.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "terminal_solaris.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "terminal.go", + ], + "//conditions:default": [], + }), + importpath = "github.com/howeyc/gopass", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/howeyc/gopass/LICENSE.txt b/vendor/github.com/howeyc/gopass/LICENSE.txt new file mode 100644 index 000000000000..14f74708a4a2 --- /dev/null +++ b/vendor/github.com/howeyc/gopass/LICENSE.txt @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012 Chris Howey + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE b/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE new file mode 100644 index 000000000000..da23621dc843 --- /dev/null +++ b/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE @@ -0,0 +1,384 @@ +Unless otherwise noted, all files in this distribution are released +under the Common Development and Distribution License (CDDL). +Exceptions are noted within the associated source files. + +-------------------------------------------------------------------- + + +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE Version 1.0 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates + or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), + and the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing + Original Software with files containing Modifications, in + each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form other + than Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this + License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed + herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original + Software or previous Modifications; + + B. Any new file that contains any part of the Original + Software or previous Modifications; or + + C. Any new file that is contributed or otherwise made + available under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable + form of computer software code that is originally released + under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, + process, and apparatus claims, in any patent Licensable by + grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms + of, this License. For legal entities, "You" includes any + entity which controls, is controlled by, or is under common + control with You. For purposes of this definition, + "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by + contract or otherwise, or (b) ownership of more than fifty + percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, the Initial + Developer hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, + reproduce, modify, display, perform, sublicense and + distribute the Original Software (or portions thereof), + with or without Modifications, and/or as part of a Larger + Work; and + + (b) under Patent Claims infringed by the making, using or + selling of Original Software, to make, have made, use, + practice, sell, and offer for sale, and/or otherwise + dispose of the Original Software (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are + effective on the date Initial Developer first distributes + or otherwise makes the Original Software available to a + third party under the terms of this License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original + Software, or (2) for infringements caused by: (i) the + modification of the Original Software, or (ii) the + combination of the Original Software with other software + or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, each + Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, + modify, display, perform, sublicense and distribute the + Modifications created by such Contributor (or portions + thereof), either on an unmodified basis, with other + Modifications, as Covered Software and/or as part of a + Larger Work; and + + (b) under Patent Claims infringed by the making, using, or + selling of Modifications made by that Contributor either + alone and/or in combination with its Contributor Version + (or portions of such combination), to make, use, sell, + offer for sale, have made, and/or otherwise dispose of: + (1) Modifications made by that Contributor (or portions + thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions + of such combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are + effective on the date Contributor first distributes or + otherwise makes the Modifications available to a third + party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted + from the Contributor Version; (2) for infringements caused + by: (i) third party modifications of Contributor Version, + or (ii) the combination of Modifications made by that + Contributor with other software (except as part of the + Contributor Version) or other devices; or (3) under Patent + Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make + available in Executable form must also be made available in Source + Code form and that Source Code form must be distributed only under + the terms of this License. You must include a copy of this + License with every copy of the Source Code form of the Covered + Software You distribute or otherwise make available. You must + inform recipients of any such Covered Software in Executable form + as to how they can obtain such Covered Software in Source Code + form in a reasonable manner on or through a medium customarily + used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or + You have sufficient rights to grant the rights conveyed by this + License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may + not remove or alter any copyright, patent or trademark notices + contained within the Covered Software, or any notices of licensing + or any descriptive text giving attribution to any Contributor or + the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version + of this License or the recipients' rights hereunder. You may + choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of + Covered Software. However, you may do so only on Your own behalf, + and not on behalf of the Initial Developer or any Contributor. + You must make it absolutely clear that any such warranty, support, + indemnity or liability obligation is offered by You alone, and You + hereby agree to indemnify the Initial Developer and every + Contributor for any liability incurred by the Initial Developer or + such Contributor as a result of warranty, support, indemnity or + liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software + under the terms of this License or under the terms of a license of + Your choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the + Covered Software in Executable form under a different license, You + must make it absolutely clear that any terms which differ from + this License are offered by You alone, not by the Initial + Developer or Contributor. You hereby agree to indemnify the + Initial Developer and every Contributor for any liability incurred + by the Initial Developer or such Contributor as a result of any + such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and + distribute the Larger Work as a single product. In such a case, + You must make sure the requirements of this License are fulfilled + for the Covered Software. + +4. Versions of the License. + + 4.1. New Versions. + + Sun Microsystems, Inc. is the initial license steward and may + publish revised and/or new versions of this License from time to + time. Each version will be given a distinguishing version number. + Except as provided in Section 4.3, no one other than the license + steward has the right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. + If the Initial Developer includes a notice in the Original + Software prohibiting it from being distributed or otherwise made + available under any subsequent version of the License, You must + distribute and make the Covered Software available under the terms + of the version of the License under which You originally received + the Covered Software. Otherwise, You may also choose to use, + distribute or otherwise make the Covered Software available under + the terms of any subsequent version of the License published by + the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license + and remove any references to the name of the license steward + (except to note that the license differs from this License); and + (b) otherwise make it clear that the license contains terms which + differ from this License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" + BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED + SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR + PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND + PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY + COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE + INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY + NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF + WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS + DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond + the termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that + the Participant Software (meaning the Contributor Version where + the Participant is a Contributor or the Original Software where + the Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if + the Initial Developer is not the Participant) and all Contributors + under Sections 2.1 and/or 2.2 of this License shall, upon 60 days + notice from Participant terminate prospectively and automatically + at the expiration of such 60 day notice period, unless if within + such 60 day period You withdraw Your claim with respect to the + Participant Software against such Participant either unilaterally + or pursuant to a written agreement with Participant. + + 6.3. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE + LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK + STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL + INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT + APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO + NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR + CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT + APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is + defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial + computer software" (as that term is defined at 48 + C.F.R. 252.227-7014(a)(1)) and "commercial computer software + documentation" as such terms are used in 48 C.F.R. 12.212 + (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 + C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all + U.S. Government End Users acquire Covered Software with only those + rights set forth herein. This U.S. Government Rights clause is in + lieu of, and supersedes, any other FAR, DFAR, or other clause or + provision that addresses Government rights in computer software + under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed + by the law of the jurisdiction specified in a notice contained + within the Original Software (except to the extent applicable law, + if any, provides otherwise), excluding such jurisdiction's + conflict-of-law provisions. Any litigation relating to this + License shall be subject to the jurisdiction of the courts located + in the jurisdiction and venue specified in a notice contained + within the Original Software, with the losing party responsible + for costs, including, without limitation, court costs and + reasonable attorneys' fees and expenses. The application of the + United Nations Convention on Contracts for the International Sale + of Goods is expressly excluded. Any law or regulation which + provides that the language of a contract shall be construed + against the drafter shall not apply to this License. You agree + that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, + distribute or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or + indirectly, out of its utilization of rights under this License + and You agree to work with Initial Developer and Contributors to + distribute such responsibility on an equitable basis. Nothing + herein is intended or shall be deemed to constitute any admission + of liability. + +-------------------------------------------------------------------- + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND +DISTRIBUTION LICENSE (CDDL) + +For Covered Software in this distribution, this License shall +be governed by the laws of the State of California (excluding +conflict-of-law provisions). + +Any litigation relating to this License shall be subject to the +jurisdiction of the Federal Courts of the Northern District of +California and the state courts of the State of California, with +venue lying in Santa Clara County, California. diff --git a/vendor/github.com/howeyc/gopass/README.md b/vendor/github.com/howeyc/gopass/README.md new file mode 100644 index 000000000000..2d6a4e72c923 --- /dev/null +++ b/vendor/github.com/howeyc/gopass/README.md @@ -0,0 +1,27 @@ +# getpasswd in Go [![GoDoc](https://godoc.org/github.com/howeyc/gopass?status.svg)](https://godoc.org/github.com/howeyc/gopass) [![Build Status](https://secure.travis-ci.org/howeyc/gopass.png?branch=master)](http://travis-ci.org/howeyc/gopass) + +Retrieve password from user terminal or piped input without echo. + +Verified on BSD, Linux, and Windows. + +Example: +```go +package main + +import "fmt" +import "github.com/howeyc/gopass" + +func main() { + fmt.Printf("Password: ") + + // Silent. For printing *'s use gopass.GetPasswdMasked() + pass, err := gopass.GetPasswd() + if err != nil { + // Handle gopass.ErrInterrupted or getch() read error + } + + // Do something with pass +} +``` + +Caution: Multi-byte characters not supported! diff --git a/vendor/github.com/howeyc/gopass/pass.go b/vendor/github.com/howeyc/gopass/pass.go new file mode 100644 index 000000000000..f5bd5a51a8c6 --- /dev/null +++ b/vendor/github.com/howeyc/gopass/pass.go @@ -0,0 +1,110 @@ +package gopass + +import ( + "errors" + "fmt" + "io" + "os" +) + +type FdReader interface { + io.Reader + Fd() uintptr +} + +var defaultGetCh = func(r io.Reader) (byte, error) { + buf := make([]byte, 1) + if n, err := r.Read(buf); n == 0 || err != nil { + if err != nil { + return 0, err + } + return 0, io.EOF + } + return buf[0], nil +} + +var ( + maxLength = 512 + ErrInterrupted = errors.New("interrupted") + ErrMaxLengthExceeded = fmt.Errorf("maximum byte limit (%v) exceeded", maxLength) + + // Provide variable so that tests can provide a mock implementation. + getch = defaultGetCh +) + +// getPasswd returns the input read from terminal. +// If prompt is not empty, it will be output as a prompt to the user +// If masked is true, typing will be matched by asterisks on the screen. +// Otherwise, typing will echo nothing. +func getPasswd(prompt string, masked bool, r FdReader, w io.Writer) ([]byte, error) { + var err error + var pass, bs, mask []byte + if masked { + bs = []byte("\b \b") + mask = []byte("*") + } + + if isTerminal(r.Fd()) { + if oldState, err := makeRaw(r.Fd()); err != nil { + return pass, err + } else { + defer func() { + restore(r.Fd(), oldState) + fmt.Fprintln(w) + }() + } + } + + if prompt != "" { + fmt.Fprint(w, prompt) + } + + // Track total bytes read, not just bytes in the password. This ensures any + // errors that might flood the console with nil or -1 bytes infinitely are + // capped. + var counter int + for counter = 0; counter <= maxLength; counter++ { + if v, e := getch(r); e != nil { + err = e + break + } else if v == 127 || v == 8 { + if l := len(pass); l > 0 { + pass = pass[:l-1] + fmt.Fprint(w, string(bs)) + } + } else if v == 13 || v == 10 { + break + } else if v == 3 { + err = ErrInterrupted + break + } else if v != 0 { + pass = append(pass, v) + fmt.Fprint(w, string(mask)) + } + } + + if counter > maxLength { + err = ErrMaxLengthExceeded + } + + return pass, err +} + +// GetPasswd returns the password read from the terminal without echoing input. +// The returned byte array does not include end-of-line characters. +func GetPasswd() ([]byte, error) { + return getPasswd("", false, os.Stdin, os.Stdout) +} + +// GetPasswdMasked returns the password read from the terminal, echoing asterisks. +// The returned byte array does not include end-of-line characters. +func GetPasswdMasked() ([]byte, error) { + return getPasswd("", true, os.Stdin, os.Stdout) +} + +// GetPasswdPrompt prompts the user and returns the password read from the terminal. +// If mask is true, then asterisks are echoed. +// The returned byte array does not include end-of-line characters. +func GetPasswdPrompt(prompt string, mask bool, r FdReader, w io.Writer) ([]byte, error) { + return getPasswd(prompt, mask, r, w) +} diff --git a/vendor/github.com/howeyc/gopass/terminal.go b/vendor/github.com/howeyc/gopass/terminal.go new file mode 100644 index 000000000000..083564146238 --- /dev/null +++ b/vendor/github.com/howeyc/gopass/terminal.go @@ -0,0 +1,25 @@ +// +build !solaris + +package gopass + +import "golang.org/x/crypto/ssh/terminal" + +type terminalState struct { + state *terminal.State +} + +func isTerminal(fd uintptr) bool { + return terminal.IsTerminal(int(fd)) +} + +func makeRaw(fd uintptr) (*terminalState, error) { + state, err := terminal.MakeRaw(int(fd)) + + return &terminalState{ + state: state, + }, err +} + +func restore(fd uintptr, oldState *terminalState) error { + return terminal.Restore(int(fd), oldState.state) +} diff --git a/vendor/github.com/howeyc/gopass/terminal_solaris.go b/vendor/github.com/howeyc/gopass/terminal_solaris.go new file mode 100644 index 000000000000..257e1b4e81ea --- /dev/null +++ b/vendor/github.com/howeyc/gopass/terminal_solaris.go @@ -0,0 +1,69 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +// Below is derived from Solaris source, so CDDL license is included. + +package gopass + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +type terminalState struct { + state *unix.Termios +} + +// isTerminal returns true if there is a terminal attached to the given +// file descriptor. +// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func isTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// makeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c +func makeRaw(fd uintptr) (*terminalState, error) { + oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + if err != nil { + return nil, err + } + oldTermios := *oldTermiosPtr + + newTermios := oldTermios + newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL + if err := unix.IoctlSetTermios(int(fd), unix.TCSETS, &newTermios); err != nil { + return nil, err + } + + return &terminalState{ + state: oldTermiosPtr, + }, nil +} + +func restore(fd uintptr, oldState *terminalState) error { + return unix.IoctlSetTermios(int(fd), unix.TCSETS, oldState.state) +} diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore new file mode 100644 index 000000000000..529c3412ba95 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml new file mode 100644 index 000000000000..b13a50ed1fb8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -0,0 +1,7 @@ +language: go +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/BUILD b/vendor/github.com/imdario/mergo/BUILD new file mode 100644 index 000000000000..980a835ab86f --- /dev/null +++ b/vendor/github.com/imdario/mergo/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "map.go", + "merge.go", + "mergo.go", + ], + importpath = "github.com/imdario/mergo", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..469b44907a09 --- /dev/null +++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 000000000000..686680298da2 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 000000000000..4a99d31722c8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,207 @@ +# Mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +[![Build Status][1]][2] +[![GoDoc][3]][4] +[![GoCard][5]][6] +[![Coverage Status][7]][8] + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://goreportcard.com/badge/imdario/mergo +[6]: https://goreportcard.com/report/github.com/imdario/mergo +[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[8]: https://coveralls.io/github/imdario/mergo?branch=master + +### Latest release + +[Release 0.3.1](https://github.com/imdario/mergo/releases/tag/0.3.1) is an important release because it changes `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. + +### Important note + +If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). + +### Mergo in the wild + +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) + +## Installation + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +### Nice example + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v2 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "reflect" + "time" +) + +type timeTransfomer struct { +} + +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, WithTransformers(timeTransfomer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 000000000000..6e9aa7baf354 --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 000000000000..20981432922f --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,174 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) { + overwrite := config.overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + config := &config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 000000000000..8ca10c91088e --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,219 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import "reflect" + +func hasExportedField(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasExportedField(dst.Field(i)) + } else { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +type config struct { + overwrite bool + transformers transformers +} + +type transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) { + overwrite := config.overwrite + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + + if config.transformers != nil && !isEmptyValue(dst) { + if fn := config.transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasExportedField(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + case reflect.Map: + if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 { + dst.Set(reflect.MakeMap(dst.Type())) + return + } + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + dst.SetMapIndex(key, dstSlice) + } + } + if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + continue + } + + if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Slice: + dst.Set(reflect.AppendSlice(dst, src)) + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } + if src.Kind() != reflect.Interface { + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + default: + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers transformers) func(*config) { + return func(config *config) { + config.transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *config) { + config.overwrite = true +} + +func merge(dst, src interface{}, opts ...func(*config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 000000000000..785618cd0785 --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,92 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr, reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml new file mode 100644 index 000000000000..2f1ad0082bb8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/license.yml @@ -0,0 +1,4 @@ +import: ../../../../fossene/db/schema/thing.yml +fields: + site: string + author: root diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore new file mode 100644 index 000000000000..010c242bd8a9 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.swp diff --git a/vendor/github.com/jonboulle/clockwork/.travis.yml b/vendor/github.com/jonboulle/clockwork/.travis.yml new file mode 100644 index 000000000000..aefda90bfa81 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.3 + +sudo: false diff --git a/vendor/github.com/jonboulle/clockwork/BUILD b/vendor/github.com/jonboulle/clockwork/BUILD new file mode 100644 index 000000000000..b4661cc54e92 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/BUILD @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["clockwork.go"], + importpath = "github.com/jonboulle/clockwork", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE new file mode 100644 index 000000000000..5c304d1a4a7b --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md new file mode 100644 index 000000000000..d43a6c799a0c --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/README.md @@ -0,0 +1,61 @@ +clockwork +========= + +[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork) +[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork) + +a simple fake clock for golang + +# Usage + +Replace uses of the `time` package with the `clockwork.Clock` interface instead. + +For example, instead of using `time.Sleep` directly: + +``` +func my_func() { + time.Sleep(3 * time.Second) + do_something() +} +``` + +inject a clock and use its `Sleep` method instead: + +``` +func my_func(clock clockwork.Clock) { + clock.Sleep(3 * time.Second) + do_something() +} +``` + +Now you can easily test `my_func` with a `FakeClock`: + +``` +func TestMyFunc(t *testing.T) { + c := clockwork.NewFakeClock() + + // Start our sleepy function + my_func(c) + + // Ensure we wait until my_func is sleeping + c.BlockUntil(1) + + assert_state() + + // Advance the FakeClock forward in time + c.Advance(3) + + assert_state() +} +``` + +and in production builds, simply inject the real clock instead: +``` +my_func(clockwork.NewRealClock()) +``` + +See [example_test.go](example_test.go) for a full example. + +# Credits + +clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time) diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go new file mode 100644 index 000000000000..9ec96ed29688 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/clockwork.go @@ -0,0 +1,169 @@ +package clockwork + +import ( + "sync" + "time" +) + +// Clock provides an interface that packages can use instead of directly +// using the time module, so that chronology-related behavior can be tested +type Clock interface { + After(d time.Duration) <-chan time.Time + Sleep(d time.Duration) + Now() time.Time +} + +// FakeClock provides an interface for a clock which can be +// manually advanced through time +type FakeClock interface { + Clock + // Advance advances the FakeClock to a new point in time, ensuring any existing + // sleepers are notified appropriately before returning + Advance(d time.Duration) + // BlockUntil will block until the FakeClock has the given number of + // sleepers (callers of Sleep or After) + BlockUntil(n int) +} + +// NewRealClock returns a Clock which simply delegates calls to the actual time +// package; it should be used by packages in production. +func NewRealClock() Clock { + return &realClock{} +} + +// NewFakeClock returns a FakeClock implementation which can be +// manually advanced through time for testing. The initial time of the +// FakeClock will be an arbitrary non-zero time. +func NewFakeClock() FakeClock { + // use a fixture that does not fulfill Time.IsZero() + return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC)) +} + +// NewFakeClockAt returns a FakeClock initialised at the given time.Time. +func NewFakeClockAt(t time.Time) FakeClock { + return &fakeClock{ + time: t, + } +} + +type realClock struct{} + +func (rc *realClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (rc *realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +func (rc *realClock) Now() time.Time { + return time.Now() +} + +type fakeClock struct { + sleepers []*sleeper + blockers []*blocker + time time.Time + + l sync.RWMutex +} + +// sleeper represents a caller of After or Sleep +type sleeper struct { + until time.Time + done chan time.Time +} + +// blocker represents a caller of BlockUntil +type blocker struct { + count int + ch chan struct{} +} + +// After mimics time.After; it waits for the given duration to elapse on the +// fakeClock, then sends the current time on the returned channel. +func (fc *fakeClock) After(d time.Duration) <-chan time.Time { + fc.l.Lock() + defer fc.l.Unlock() + now := fc.time + done := make(chan time.Time, 1) + if d.Nanoseconds() == 0 { + // special case - trigger immediately + done <- now + } else { + // otherwise, add to the set of sleepers + s := &sleeper{ + until: now.Add(d), + done: done, + } + fc.sleepers = append(fc.sleepers, s) + // and notify any blockers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + } + return done +} + +// notifyBlockers notifies all the blockers waiting until the +// given number of sleepers are waiting on the fakeClock. It +// returns an updated slice of blockers (i.e. those still waiting) +func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { + for _, b := range blockers { + if b.count == count { + close(b.ch) + } else { + newBlockers = append(newBlockers, b) + } + } + return +} + +// Sleep blocks until the given duration has passed on the fakeClock +func (fc *fakeClock) Sleep(d time.Duration) { + <-fc.After(d) +} + +// Time returns the current time of the fakeClock +func (fc *fakeClock) Now() time.Time { + fc.l.RLock() + t := fc.time + fc.l.RUnlock() + return t +} + +// Advance advances fakeClock to a new point in time, ensuring channels from any +// previous invocations of After are notified appropriately before returning +func (fc *fakeClock) Advance(d time.Duration) { + fc.l.Lock() + defer fc.l.Unlock() + end := fc.time.Add(d) + var newSleepers []*sleeper + for _, s := range fc.sleepers { + if end.Sub(s.until) >= 0 { + s.done <- end + } else { + newSleepers = append(newSleepers, s) + } + } + fc.sleepers = newSleepers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + fc.time = end +} + +// BlockUntil will block until the fakeClock has the given number of sleepers +// (callers of Sleep or After) +func (fc *fakeClock) BlockUntil(n int) { + fc.l.Lock() + // Fast path: current number of sleepers is what we're looking for + if len(fc.sleepers) == n { + fc.l.Unlock() + return + } + // Otherwise, set up a new blocker + b := &blocker{ + count: n, + ch: make(chan struct{}), + } + fc.blockers = append(fc.blockers, b) + fc.l.Unlock() + <-b.ch +} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 000000000000..955dc0be5fa6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 000000000000..15556530a854 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 000000000000..449e67cd01ac --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/BUILD b/vendor/github.com/json-iterator/go/BUILD new file mode 100644 index 000000000000..375b54b8bc50 --- /dev/null +++ b/vendor/github.com/json-iterator/go/BUILD @@ -0,0 +1,64 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "feature_adapter.go", + "feature_any.go", + "feature_any_array.go", + "feature_any_bool.go", + "feature_any_float.go", + "feature_any_int32.go", + "feature_any_int64.go", + "feature_any_invalid.go", + "feature_any_nil.go", + "feature_any_number.go", + "feature_any_object.go", + "feature_any_string.go", + "feature_any_uint32.go", + "feature_any_uint64.go", + "feature_config.go", + "feature_config_with_sync_map.go", + "feature_config_without_sync_map.go", + "feature_iter.go", + "feature_iter_array.go", + "feature_iter_float.go", + "feature_iter_int.go", + "feature_iter_object.go", + "feature_iter_skip.go", + "feature_iter_skip_strict.go", + "feature_iter_string.go", + "feature_json_number.go", + "feature_pool.go", + "feature_reflect.go", + "feature_reflect_array.go", + "feature_reflect_extension.go", + "feature_reflect_map.go", + "feature_reflect_native.go", + "feature_reflect_object.go", + "feature_reflect_optional.go", + "feature_reflect_slice.go", + "feature_reflect_struct_decoder.go", + "feature_stream.go", + "feature_stream_float.go", + "feature_stream_int.go", + "feature_stream_string.go", + "jsoniter.go", + ], + importpath = "github.com/json-iterator/go", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 000000000000..f34f5b4ad1c8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,33 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/google/gofuzz" + packages = ["."] + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = ["assert","require"] + revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" + version = "v1.1.4" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "f8b7cf3941d3792cbbd570bb53c093adaf774334d1162c651565c97a58dc9d09" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 000000000000..0ac55ef876ac --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,33 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/davecgh/go-spew" + version = "1.1.0" + +[[constraint]] + branch = "master" + name = "github.com/google/gofuzz" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.1.4" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 000000000000..2cf4f5ab28e9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 000000000000..3a0d680983b9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,86 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +``` +Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com +``` + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) +* [Oleg Shaldybin](https://github.com/olegshaldybin) +* [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100755 index 000000000000..b45ef688313e --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go new file mode 100644 index 000000000000..e0ab94807530 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_adapter.go @@ -0,0 +1,138 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +func lastNotSpacePos(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { + return i + 1 + } + } + return 0 +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + return adapter.iter.head != adapter.iter.tail +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) +func (adapter *Decoder) UseNumber() { + origCfg := adapter.iter.cfg.configBeforeFrozen + origCfg.UseNumber = true + adapter.iter.cfg = origCfg.Froze().(*frozenConfig) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + adapter.stream.cfg.indentionStep = len(indent) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.Froze().(*frozenConfig) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go new file mode 100644 index 000000000000..87716d1fcf2e --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any.go @@ -0,0 +1,245 @@ +package jsoniter + +import ( + "errors" + "fmt" + "io" + "reflect" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + // TODO: add Set + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + return WrapUint64(uint64(val.(uint))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/feature_any_array.go new file mode 100644 index 000000000000..0449e9aa428a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/feature_any_bool.go new file mode 100644 index 000000000000..9452324af5b1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/feature_any_float.go new file mode 100644 index 000000000000..35fdb09497fa --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/feature_any_int32.go new file mode 100644 index 000000000000..1b56f399150d --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/feature_any_int64.go new file mode 100644 index 000000000000..c440d72b6d3a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/feature_any_invalid.go new file mode 100644 index 000000000000..1d859eac3274 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/feature_any_nil.go new file mode 100644 index 000000000000..d04cb54c11c1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/feature_any_number.go new file mode 100644 index 000000000000..bf8d024626b5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "unsafe" + "io" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/feature_any_object.go new file mode 100644 index 000000000000..c44ef5c989a4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/feature_any_string.go new file mode 100644 index 000000000000..abf060bd59ae --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_string.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/feature_any_uint32.go new file mode 100644 index 000000000000..656bbd33d7ee --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/feature_any_uint64.go new file mode 100644 index 000000000000..7df2fce33ba9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go new file mode 100644 index 000000000000..2d2fc8cd08ed --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_config.go @@ -0,0 +1,296 @@ +package jsoniter + +import ( + "encoding/json" + "errors" + "io" + "reflect" + "unsafe" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +// Froze forge API from config +func (cfg Config) Froze() API { + // TODO: cache frozen config + frozenConfig := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + streamPool: make(chan *Stream, 16), + iteratorPool: make(chan *Iterator, 16), + } + frozenConfig.initCache() + if cfg.MarshalFloatWith6Digits { + frozenConfig.marshalFloatWith6Digits() + } + if cfg.EscapeHTML { + frozenConfig.escapeHTML() + } + if cfg.UseNumber { + frozenConfig.useNumber() + } + if cfg.ValidateJsonRawMessage { + frozenConfig.validateJsonRawMessage() + } + frozenConfig.configBeforeFrozen = cfg + return frozenConfig +} + +func (cfg *frozenConfig) validateJsonRawMessage() { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return false + }} + cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder) + cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder) +} + +func (cfg *frozenConfig) useNumber() { + cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }}) +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extensions = append(cfg.extensions, extension) +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits() { + // for better performance + cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) + cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML() { + cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.Froze().Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + typ := reflect.TypeOf(v) + if typ.Kind() != reflect.Ptr { + // return non-pointer error + return errors.New("the second param must be ptr type") + } + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/feature_config_with_sync_map.go b/vendor/github.com/json-iterator/go/feature_config_with_sync_map.go new file mode 100644 index 000000000000..e81e1b7f750e --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_config_with_sync_map.go @@ -0,0 +1,51 @@ +//+build go1.9 + +package jsoniter + +import ( + "reflect" + "sync" +) + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + decoderCache sync.Map + encoderCache sync.Map + extensions []Extension + streamPool chan *Stream + iteratorPool chan *Iterator +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = sync.Map{} + cfg.encoderCache = sync.Map{} +} + + +func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/feature_config_without_sync_map.go b/vendor/github.com/json-iterator/go/feature_config_without_sync_map.go new file mode 100644 index 000000000000..f01f971e0f92 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_config_without_sync_map.go @@ -0,0 +1,54 @@ +//+build !go1.9 + +package jsoniter + +import ( + "reflect" + "sync" +) + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + cacheLock *sync.RWMutex + decoderCache map[reflect.Type]ValDecoder + encoderCache map[reflect.Type]ValEncoder + extensions []Extension + streamPool chan *Stream + iteratorPool chan *Iterator +} + +func (cfg *frozenConfig) initCache() { + cfg.cacheLock = &sync.RWMutex{} + cfg.decoderCache = map[reflect.Type]ValDecoder{} + cfg.encoderCache = map[reflect.Type]ValEncoder{} +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { + cfg.cacheLock.Lock() + cfg.decoderCache[cacheKey] = decoder + cfg.cacheLock.Unlock() +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { + cfg.cacheLock.Lock() + cfg.encoderCache[cacheKey] = encoder + cfg.cacheLock.Unlock() +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { + cfg.cacheLock.RLock() + decoder, _ := cfg.decoderCache[cacheKey].(ValDecoder) + cfg.cacheLock.RUnlock() + return decoder +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { + cfg.cacheLock.RLock() + encoder, _ := cfg.encoderCache[cacheKey].(ValEncoder) + cfg.cacheLock.RUnlock() + return encoder +} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go new file mode 100644 index 000000000000..95ae54fbfe4d --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter.go @@ -0,0 +1,322 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/feature_iter_array.go new file mode 100644 index 000000000000..6188cb4577ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_array.go @@ -0,0 +1,58 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + return false + } + return true + } + return true + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/feature_iter_float.go new file mode 100644 index 000000000000..4f883c0959f7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_float.go @@ -0,0 +1,347 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go new file mode 100644 index 000000000000..4781c63933cf --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_int.go @@ -0,0 +1,339 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go new file mode 100644 index 000000000000..dfd91fa60d95 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "unicode" + "unsafe" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + if iter.cfg.objectFieldMustBeSimpleString { + return string(iter.readObjectFieldAsBytes()) + } else { + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + if iter.cfg.objectFieldMustBeSimpleString { + return string(iter.readObjectFieldAsBytes()) + } else { + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +func (iter *Iterator) readFieldHash() int32 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c == '"' { + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if !iter.cfg.objectFieldMustBeSimpleString && b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return int32(hash) + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return int32(hash) + } + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } + } + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 +} + +func calcHash(str string) int32 { + hash := int64(0x811c9dc5) + for _, b := range str { + hash ^= int64(unicode.ToLower(b)) + hash *= 0x1000193 + } + return int32(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var fieldBytes []byte + var field string + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/feature_iter_skip.go new file mode 100644 index 000000000000..f58beb9137bf --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip.go @@ -0,0 +1,129 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +type captureBuffer struct { + startedAt int + captured []byte +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = make([]byte, 0, 32) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + if len(captured) == 0 { + copied := make([]byte, len(remaining)) + copy(copied, remaining) + return copied + } + captured = append(captured, remaining...) + return captured +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go new file mode 100644 index 000000000000..8fcdc3b69bdf --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go @@ -0,0 +1,144 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + case ']': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + case '}': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go new file mode 100644 index 000000000000..f67bc2e83151 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go @@ -0,0 +1,89 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import "fmt" + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + iter.ReadFloat32() + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/feature_iter_string.go new file mode 100644 index 000000000000..adc487ea8048 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_string.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go new file mode 100644 index 000000000000..e187b200a9cd --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_json_number.go @@ -0,0 +1,31 @@ +package jsoniter + +import ( + "encoding/json" + "strconv" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go new file mode 100644 index 000000000000..52d38e685540 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_pool.go @@ -0,0 +1,59 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + select { + case stream := <-cfg.streamPool: + stream.Reset(writer) + return stream + default: + return NewStream(cfg, writer, 512) + } +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.Error = nil + stream.Attachment = nil + select { + case cfg.streamPool <- stream: + return + default: + return + } +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + select { + case iter := <-cfg.iteratorPool: + iter.ResetBytes(data) + return iter + default: + return ParseBytes(cfg, data) + } +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + select { + case cfg.iteratorPool <- iter: + return + default: + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go new file mode 100644 index 000000000000..75d533b0719d --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect.go @@ -0,0 +1,607 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "time" + "unsafe" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) + EncodeInterface(val interface{}, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +// WriteToStream the default implementation for TypeEncoder method EncodeInterface +func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteNil() + return + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +var jsonNumberType reflect.Type +var jsoniterNumberType reflect.Type +var jsonRawMessageType reflect.Type +var jsoniterRawMessageType reflect.Type +var anyType reflect.Type +var marshalerType reflect.Type +var unmarshalerType reflect.Type +var textMarshalerType reflect.Type +var textUnmarshalerType reflect.Type + +func init() { + jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() + jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() + jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() + jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() + anyType = reflect.TypeOf((*Any)(nil)).Elem() + marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + typ := reflect.TypeOf(obj) + cacheKey := typ.Elem() + decoder := decoderOfType(iter.cfg, "", cacheKey) + e := (*emptyInterface)(unsafe.Pointer(&obj)) + if e.word == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(e.word, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + typ := reflect.TypeOf(val) + cacheKey := typ + encoder := encoderOfType(stream.cfg, "", cacheKey) + encoder.EncodeInterface(val, stream) +} + +func decoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + cacheKey := typ + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + decoder = getTypeDecoderFromExtension(cfg, typ) + if decoder != nil { + cfg.addDecoderToCache(cacheKey, decoder) + return decoder + } + decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} + cfg.addDecoderToCache(cacheKey, decoder) + decoder = createDecoderOfType(cfg, prefix, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + for _, extension := range cfg.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func createDecoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + typeName := typ.String() + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + if typ.Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &OptionalDecoder{typ.Elem(), decoder} + } + return decoder + } + if reflect.PtrTo(typ).Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + return decoder + } + if typ.Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &OptionalDecoder{typ.Elem(), decoder} + } + return decoder + } + if reflect.PtrTo(typ).Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + return decoder + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(cfg, prefix, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + if typ.Implements(anyType) { + return &anyCodec{} + } + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{} + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{} + } + return &nonEmptyInterfaceCodec{} + case reflect.Struct: + return decoderOfStruct(cfg, prefix, typ) + case reflect.Array: + return decoderOfArray(cfg, prefix, typ) + case reflect.Slice: + return decoderOfSlice(cfg, prefix, typ) + case reflect.Map: + return decoderOfMap(cfg, prefix, typ) + case reflect.Ptr: + return decoderOfOptional(cfg, prefix, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", prefix, typ.String())} + } +} + +func encoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + cacheKey := typ + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + encoder = getTypeEncoderFromExtension(cfg, typ) + if encoder != nil { + cfg.addEncoderToCache(cacheKey, encoder) + return encoder + } + encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} + cfg.addEncoderToCache(cacheKey, encoder) + encoder = createEncoderOfType(cfg, prefix, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + for _, extension := range cfg.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +func createEncoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(cfg, typ) + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &marshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &OptionalEncoder{encoder} + } + return encoder + } + if reflect.PtrTo(typ).Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(cfg, reflect.PtrTo(typ)) + templateInterface := reflect.New(typ).Interface() + var encoder ValEncoder = &marshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(cfg, typ) + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &textMarshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &OptionalEncoder{encoder} + } + return encoder + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + return &base64Codec{} + } + if typ.Implements(anyType) { + return &anyCodec{} + } + return createEncoderOfSimpleType(cfg, prefix, typ) +} + +func createCheckIsEmpty(cfg *frozenConfig, typ reflect.Type) checkIsEmpty { + kind := typ.Kind() + switch kind { + case reflect.String: + return &stringCodec{} + case reflect.Int: + return &intCodec{} + case reflect.Int8: + return &int8Codec{} + case reflect.Int16: + return &int16Codec{} + case reflect.Int32: + return &int32Codec{} + case reflect.Int64: + return &int64Codec{} + case reflect.Uint: + return &uintCodec{} + case reflect.Uint8: + return &uint8Codec{} + case reflect.Uint16: + return &uint16Codec{} + case reflect.Uint32: + return &uint32Codec{} + case reflect.Uintptr: + return &uintptrCodec{} + case reflect.Uint64: + return &uint64Codec{} + case reflect.Float32: + return &float32Codec{} + case reflect.Float64: + return &float64Codec{} + case reflect.Bool: + return &boolCodec{} + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{} + } + return &nonEmptyInterfaceCodec{} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(cfg, "", typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func createEncoderOfSimpleType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{} + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{} + } + return &nonEmptyInterfaceCodec{} + case reflect.Struct: + return encoderOfStruct(cfg, prefix, typ) + case reflect.Array: + return encoderOfArray(cfg, prefix, typ) + case reflect.Slice: + return encoderOfSlice(cfg, prefix, typ) + case reflect.Map: + return encoderOfMap(cfg, prefix, typ) + case reflect.Ptr: + return encoderOfOptional(cfg, prefix, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", prefix, typ.String())} + } +} + +type placeholderEncoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.getRealEncoder().Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { + encoder.getRealEncoder().EncodeInterface(val, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.getRealEncoder().IsEmpty(ptr) +} + +func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { + for i := 0; i < 500; i++ { + realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderEncoder) + if isPlaceholder { + time.Sleep(10 * time.Millisecond) + } else { + return realDecoder + } + } + panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) +} + +type placeholderDecoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + for i := 0; i < 500; i++ { + realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderDecoder) + if isPlaceholder { + time.Sleep(10 * time.Millisecond) + } else { + realDecoder.Decode(ptr, iter) + return + } + } + panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) EncodeInterface(val interface{}, stream *Stream) { + if val == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +func extractInterface(val interface{}) emptyInterface { + return *((*emptyInterface)(unsafe.Pointer(&val))) +} + +// emptyInterface is the header for an interface{} value. +type emptyInterface struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +// emptyInterface is the header for an interface with method (not interface{}) +type nonEmptyInterface struct { + // see ../runtime/iface.go:/Itab + itab *struct { + ityp unsafe.Pointer // static interface type + typ unsafe.Pointer // dynamic concrete type + link unsafe.Pointer + bad int32 + unused int32 + fun [100000]unsafe.Pointer // method table + } + word unsafe.Pointer +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go new file mode 100644 index 000000000000..a6dd91cab822 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_array.go @@ -0,0 +1,110 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfArray(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + decoder := decoderOfType(cfg, prefix+"[array]->", typ.Elem()) + return &arrayDecoder{typ, typ.Elem(), decoder} +} + +func encoderOfArray(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + if typ.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(cfg, prefix+"[array]->", typ.Elem()) + if typ.Elem().Kind() == reflect.Map { + encoder = &OptionalEncoder{encoder} + } + return &arrayEncoder{typ, typ.Elem(), encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { + // special optimization for interface{} + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteArrayStart() + stream.WriteNil() + stream.WriteArrayEnd() + return + } + elemType := encoder.arrayType.Elem() + if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + if offset < decoder.arrayType.Size() { + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) + offset += decoder.elemType.Size() + } else { + iter.Skip() + } + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go new file mode 100644 index 000000000000..66083a521839 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_extension.go @@ -0,0 +1,424 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + onePtrEmbedded bool + onePtrOptimization bool + Type reflect.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field *reflect.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateDecoder(typ reflect.Type) ValDecoder + CreateEncoder(typ reflect.Type) ValEncoder + DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(cfg, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + for _, extension := range cfg.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + for _, extension := range cfg.extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder := typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + decoder := typeDecoders[typ.Elem().String()] + if decoder != nil { + return &OptionalDecoder{typ.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(cfg, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + for _, extension := range cfg.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + for _, extension := range cfg.extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder := typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + encoder := typeEncoders[typ.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(cfg *frozenConfig, prefix string, typ reflect.Type) *StructDescriptor { + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + tag, hastag := field.Tag.Lookup(cfg.getTagKey()) + if cfg.onlyTaggedField && !hastag { + continue + } + tagParts := strings.Split(tag, ",") + if tag == "-" { + continue + } + if field.Anonymous && (tag == "" || tagParts[0] == "") { + if field.Type.Kind() == reflect.Struct { + structDescriptor := describeStruct(cfg, prefix, field.Type) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(cfg, prefix, field.Type.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{field.Type.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + fieldNames := calcFieldNames(field.Name, tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(cfg, prefix+typ.String()+"."+field.Name+"->", field.Type) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(cfg, prefix+typ.String()+"."+field.Name+"->", field.Type) + // map is stored as pointer in the struct, + // and treat nil or empty map as empty field + if encoder != nil && field.Type.Kind() == reflect.Map { + encoder = &optionalMapEncoder{encoder} + } + } + binding := &Binding{ + Field: &field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(cfg, typ, bindings, embeddedBindings) +} +func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + onePtrEmbedded := false + onePtrOptimization := false + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { + onePtrEmbedded = true + } + fallthrough + case reflect.Map: + onePtrOptimization = true + case reflect.Struct: + onePtrOptimization = isStructOnePtr(firstField.Type) + } + } + structDescriptor := &StructDescriptor{ + onePtrEmbedded: onePtrEmbedded, + onePtrOptimization: onePtrOptimization, + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + for _, extension := range cfg.extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, cfg) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +func isStructOnePtr(typ reflect.Type) bool { + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Struct: + return isStructOnePtr(firstField.Type) + } + } + return false +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type.Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go new file mode 100644 index 000000000000..f537d026a4aa --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_map.go @@ -0,0 +1,260 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "reflect" + "sort" + "strconv" + "unsafe" +) + +func decoderOfMap(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + decoder := decoderOfType(cfg, prefix+"[map]->", typ.Elem()) + mapInterface := reflect.New(typ).Interface() + return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)} +} + +func encoderOfMap(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + elemType := typ.Elem() + encoder := encoderOfType(cfg, prefix+"[map]->", elemType) + mapInterface := reflect.New(typ).Elem().Interface() + if cfg.sortMapKeys { + return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))} + } + return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))} +} + +type mapDecoder struct { + mapType reflect.Type + keyType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder + mapInterface emptyInterface +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type + mapInterface := decoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface).Elem() + if iter.ReadNil() { + realVal.Set(reflect.Zero(decoder.mapType)) + return + } + if realVal.IsNil() { + realVal.Set(reflect.MakeMap(realVal.Type())) + } + iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { + elem := reflect.New(decoder.elemType) + decoder.elemDecoder.Decode(extractInterface(elem.Interface()).word, iter) + // to put into map, we have to use reflection + keyType := decoder.keyType + // TODO: remove this from loop + switch { + case keyType.Kind() == reflect.String: + realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) + return true + case keyType.Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) + return true + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) + return true + default: + switch keyType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowInt(n) { + iter.ReportError("read map key as int64", "read int64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowUint(n) { + iter.ReportError("read map key as uint64", "read uint64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + } + } + iter.ReportError("read map key", "unexpected map key type "+keyType.String()) + return true + }) +} + +type mapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + stream.WriteObjectStart() + for i, key := range realVal.MapKeys() { + if i != 0 { + stream.WriteMore() + } + encodeMapKey(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +func encodeMapKey(key reflect.Value, stream *Stream) { + if key.Kind() == reflect.String { + stream.WriteString(key.String()) + return + } + if tm, ok := key.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + if err != nil { + stream.Error = err + return + } + stream.writeByte('"') + stream.Write(buf) + stream.writeByte('"') + return + } + switch key.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stream.writeByte('"') + stream.WriteInt64(key.Int()) + stream.writeByte('"') + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + stream.writeByte('"') + stream.WriteUint64(key.Uint()) + stream.writeByte('"') + return + } + stream.Error = &json.UnsupportedTypeError{Type: key.Type()} +} + +func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} + +type sortKeysMapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + + // Extract and sort the keys. + keys := realVal.MapKeys() + sv := stringValues(make([]reflectWithString, len(keys))) + for i, v := range keys { + sv[i].v = v + if err := sv[i].resolve(); err != nil { + stream.Error = err + return + } + } + sort.Sort(sv) + + stream.WriteObjectStart() + for i, key := range sv { + if i != 0 { + stream.WriteMore() + } + stream.WriteVal(key.s) // might need html escape, so can not WriteString directly + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key.v).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflectWithString + +type reflectWithString struct { + v reflect.Value + s string +} + +func (w *reflectWithString) resolve() error { + if w.v.Kind() == reflect.String { + w.s = w.v.String() + return nil + } + if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + w.s = string(buf) + return err + } + switch w.v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + w.s = strconv.FormatInt(w.v.Int(), 10) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + w.s = strconv.FormatUint(w.v.Uint(), 10) + return nil + } + return &json.UnsupportedTypeError{Type: w.v.Type()} +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } + +func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go new file mode 100644 index 000000000000..464a8f7a613b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_native.go @@ -0,0 +1,789 @@ +package jsoniter + +import ( + "encoding" + "encoding/base64" + "encoding/json" + "reflect" + "unsafe" +) + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type intCodec struct { +} + +func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int)(ptr)) = iter.ReadInt() + } +} + +func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt(*((*int)(ptr))) +} + +func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int)(ptr)) == 0 +} + +type uintptrCodec struct { +} + +func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) + } +} + +func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(uint64(*((*uintptr)(ptr)))) +} + +func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uintptr)(ptr)) == 0 +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uintCodec struct { +} + +func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint)(ptr)) = iter.ReadUint() + return + } +} + +func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint(*((*uint)(ptr))) +} + +func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type emptyInterfaceCodec struct { +} + +func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + existing := *((*interface{})(ptr)) + + // Checking for both typed and untyped nil pointers. + if existing != nil && + reflect.TypeOf(existing).Kind() == reflect.Ptr && + !reflect.ValueOf(existing).IsNil() { + + var ptrToExisting interface{} + for { + elem := reflect.ValueOf(existing).Elem() + if elem.Kind() != reflect.Ptr || elem.IsNil() { + break + } + ptrToExisting = existing + existing = elem.Interface() + } + + if iter.ReadNil() { + if ptrToExisting != nil { + nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem()) + reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr) + } else { + *((*interface{})(ptr)) = nil + } + } else { + iter.ReadVal(existing) + } + + return + } + + if iter.ReadNil() { + *((*interface{})(ptr)) = nil + } else { + *((*interface{})(ptr)) = iter.Read() + } +} + +func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteVal(*((*interface{})(ptr))) +} + +func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + emptyInterface := (*emptyInterface)(ptr) + return emptyInterface.typ == nil +} + +type nonEmptyInterfaceCodec struct { +} + +func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*interface{})(ptr)) = nil + return + } + nonEmptyInterface := (*nonEmptyInterface)(ptr) + if nonEmptyInterface.itab == nil { + iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") + return + } + var i interface{} + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + iter.ReadVal(&i) + if e.word == nil { + nonEmptyInterface.itab = nil + } + nonEmptyInterface.word = e.word +} + +func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + var i interface{} + if nonEmptyInterface.itab != nil { + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + } + stream.WriteVal(i) +} + +func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + return nonEmptyInterface.word == nil +} + +type anyCodec struct { +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*Any)(ptr)) = iter.ReadAny() +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + (*((*Any)(ptr))).WriteTo(stream) +} + +func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { + (val.(Any)).WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + return (*((*Any)(ptr))).Size() == 0 +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.WriteRaw("0") + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + number := val.(json.Number) + if len(number) == 0 { + stream.WriteRaw("0") + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.WriteRaw("0") + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + number := val.(Number) + if len(number) == 0 { + stream.WriteRaw("0") + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.RawMessage))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(RawMessage))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} + +type base64Codec struct { + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Len = 0 + ptrSlice.Cap = 0 + ptrSlice.Data = nil + return + } + switch iter.WhatIsNext() { + case StringValue: + encoding := base64.StdEncoding + src := iter.SkipAndReturnBytes() + src = src[1 : len(src)-1] + decodedLen := encoding.DecodedLen(len(src)) + dst := make([]byte, decodedLen) + len, err := encoding.Decode(dst, src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + dst = dst[:len] + dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Data = dstSlice.Data + ptrSlice.Cap = dstSlice.Cap + ptrSlice.Len = dstSlice.Len + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { + ptr := extractInterface(val).word + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type marshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler, ok := (*realInterface).(json.Marshaler) + if !ok { + stream.WriteVal(nil) + return + } + + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} +func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler := (*realInterface).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + stream.WriteString(string(bytes)) + } +} + +func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/feature_reflect_object.go new file mode 100644 index 000000000000..036545cbe37b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_object.go @@ -0,0 +1,195 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func encoderOfStruct(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(cfg, prefix, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, structDescriptor.onePtrEmbedded, + structDescriptor.onePtrOptimization, finalOrderedFields} +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +func decoderOfStruct(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(cfg, prefix, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + return createStructDecoder(typ, fields) +} + +type structFieldEncoder struct { + field *reflect.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +type structEncoder struct { + typ reflect.Type + onePtrEmbedded bool + onePtrOptimization bool + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if encoder.onePtrOptimization { + if e.word == nil && encoder.onePtrEmbedded { + stream.WriteObjectStart() + stream.WriteObjectEnd() + return + } + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_optional.go b/vendor/github.com/json-iterator/go/feature_reflect_optional.go new file mode 100644 index 000000000000..fe7055fcd805 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_optional.go @@ -0,0 +1,124 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +func decoderOfOptional(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + elemType := typ.Elem() + decoder := decoderOfType(cfg, prefix, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + elemType := typ.Elem() + elemEncoder := encoderOfType(cfg, prefix, elemType) + encoder := &OptionalEncoder{elemEncoder} + if elemType.Kind() == reflect.Map { + encoder = &OptionalEncoder{encoder} + } + return encoder +} + +type OptionalDecoder struct { + ValueType reflect.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.ValueType) + newPtr := extractInterface(value.Interface()).word + decoder.ValueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.valueType) + newPtr := extractInterface(value.Interface()).word + decoder.valueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.ValueEncoder.IsEmpty(*((*unsafe.Pointer)(ptr))) +} + +type optionalMapEncoder struct { + valueEncoder ValEncoder +} + +func (encoder *optionalMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *optionalMapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *optionalMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + p := *((*unsafe.Pointer)(ptr)) + return p == nil || encoder.valueEncoder.IsEmpty(p) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go new file mode 100644 index 000000000000..078b3bc078e9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_slice.go @@ -0,0 +1,143 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfSlice(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + decoder := decoderOfType(cfg, prefix+"[slice]->", typ.Elem()) + return &sliceDecoder{typ, typ.Elem(), decoder} +} + +func encoderOfSlice(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + encoder := encoderOfType(cfg, prefix+"[slice]->", typ.Elem()) + if typ.Elem().Kind() == reflect.Map { + encoder = &OptionalEncoder{encoder} + } + return &sliceEncoder{typ, typ.Elem(), encoder} +} + +type sliceEncoder struct { + sliceType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + slice := (*sliceHeader)(ptr) + if slice.Data == nil { + stream.WriteNil() + return + } + if slice.Len == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(slice.Data) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + for i := 1; i < slice.Len; i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + slice := (*sliceHeader)(ptr) + return slice.Len == 0 +} + +type sliceDecoder struct { + sliceType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +// sliceHeader is a safe version of SliceHeader used within this package. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + slice := (*sliceHeader)(ptr) + if iter.ReadNil() { + slice.Len = 0 + slice.Cap = 0 + slice.Data = nil + return + } + reuseSlice(slice, decoder.sliceType, 4) + slice.Len = 0 + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + growOne(slice, decoder.sliceType, decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) + offset += decoder.elemType.Size() + return true + }) +} + +// grow grows the slice s so that it can hold extra more values, allocating +// more capacity if needed. It also returns the old and new slice lengths. +func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { + newLen := slice.Len + 1 + if newLen <= slice.Cap { + slice.Len = newLen + return + } + newCap := slice.Cap + if newCap == 0 { + newCap = 1 + } else { + for newCap < newLen { + if slice.Len < 1024 { + newCap += newCap + } else { + newCap += newCap / 4 + } + } + } + newVal := reflect.MakeSlice(sliceType, newLen, newCap).Interface() + newValPtr := extractInterface(newVal).word + dst := (*sliceHeader)(newValPtr).Data + // copy old array into new array + originalBytesCount := slice.Len * int(elementType.Size()) + srcSliceHeader := (unsafe.Pointer)(&sliceHeader{slice.Data, originalBytesCount, originalBytesCount}) + dstSliceHeader := (unsafe.Pointer)(&sliceHeader{dst, originalBytesCount, originalBytesCount}) + copy(*(*[]byte)(dstSliceHeader), *(*[]byte)(srcSliceHeader)) + slice.Data = dst + slice.Len = newLen + slice.Cap = newCap +} + +func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { + if expectedCap <= slice.Cap { + return + } + newVal := reflect.MakeSlice(sliceType, 0, expectedCap).Interface() + newValPtr := extractInterface(newVal).word + dst := (*sliceHeader)(newValPtr).Data + slice.Data = dst + slice.Cap = expectedCap +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go new file mode 100644 index 000000000000..d30804855beb --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go @@ -0,0 +1,966 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) ValDecoder { + knownHash := map[int32]struct{}{ + 0: {}, + } + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int32 + var fieldHash2 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldName10 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields} +} + +type generalStructDecoder struct { + typ reflect.Type + fields map[string]*structFieldDecoder +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + var fieldBytes []byte + var field string + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + fieldDecoder := decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + for iter.nextToken() == ',' { + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + fieldDecoder = decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type skipObjectDecoder struct { + typ reflect.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect.Type + fieldHash int32 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type twoFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type threeFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fourFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fiveFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sixFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sevenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type eightFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type nineFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type tenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder + fieldHash10 int32 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type structFieldDecoder struct { + field *reflect.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go new file mode 100644 index 000000000000..97355eb5b70c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream.go @@ -0,0 +1,308 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + n int + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, bufSize), + n: 0, + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.n = 0 +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return len(stream.buf) - stream.n +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return stream.n +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf[:stream.n] +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + for len(p) > stream.Available() && stream.Error == nil { + if stream.out == nil { + stream.growAtLeast(len(p)) + } else { + var n int + if stream.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, stream.Error = stream.out.Write(p) + } else { + n = copy(stream.buf[stream.n:], p) + stream.n += n + stream.Flush() + } + nn += n + p = p[n:] + } + } + if stream.Error != nil { + return nn, stream.Error + } + n := copy(stream.buf[stream.n:], p) + stream.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + if stream.Error != nil { + return + } + if stream.Available() < 1 { + stream.growAtLeast(1) + } + stream.buf[stream.n] = c + stream.n++ +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 2 { + stream.growAtLeast(2) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.n += 2 +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 3 { + stream.growAtLeast(3) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.n += 3 +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 4 { + stream.growAtLeast(4) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.n += 4 +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 5 { + stream.growAtLeast(5) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.buf[stream.n+4] = c5 + stream.n += 5 +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + if stream.n == 0 { + return nil + } + n, err := stream.out.Write(stream.buf[0:stream.n]) + if n < stream.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < stream.n { + copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) + } + stream.n -= n + stream.Error = err + return err + } + stream.n = 0 + return nil +} + +func (stream *Stream) ensure(minimal int) { + available := stream.Available() + if available < minimal { + stream.growAtLeast(minimal) + } +} + +func (stream *Stream) growAtLeast(minimal int) { + if stream.out != nil { + stream.Flush() + if stream.Available() >= minimal { + return + } + } + toGrow := len(stream.buf) + if toGrow < minimal { + toGrow = minimal + } + newBuf := make([]byte, len(stream.buf)+toGrow) + copy(newBuf, stream.Buffer()) + stream.buf = newBuf +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.ensure(len(s)) + if stream.Error != nil { + return + } + n := copy(stream.buf[stream.n:], s) + stream.n += n +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + stream.ensure(toWrite) + for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { + stream.buf[stream.n] = ' ' + stream.n++ + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/feature_stream_float.go new file mode 100644 index 000000000000..9a404e11d4a6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_float.go @@ -0,0 +1,96 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go new file mode 100644 index 000000000000..7cfd522c1064 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_int.go @@ -0,0 +1,320 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(buf []byte, v uint32, n int) int { + start := v >> 24 + if start == 0 { + buf[n] = byte(v >> 16) + n++ + buf[n] = byte(v >> 8) + n++ + } else if start == 1 { + buf[n] = byte(v >> 8) + n++ + } + buf[n] = byte(v) + n++ + return n +} + +func writeBuf(buf []byte, v uint32, n int) { + buf[n] = byte(v >> 16) + buf[n+1] = byte(v >> 8) + buf[n+2] = byte(v) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.ensure(3) + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + stream.ensure(4) + n := stream.n + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint8(nval) + } + stream.n = writeFirstBuf(stream.buf, digits[val], n) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + stream.ensure(5) + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) + return + } + r1 := val - q1*1000 + n := writeFirstBuf(stream.buf, digits[q1], stream.n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + stream.ensure(6) + n := stream.n + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint16(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + n = writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + stream.ensure(10) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + stream.ensure(11) + n := stream.n + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint32(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + stream.ensure(20) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + n = writeFirstBuf(stream.buf, digits[q6], n) + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + stream.ensure(20) + n := stream.n + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint64(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + stream.buf[n] = byte(q6 + '0') + n++ + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/feature_stream_string.go new file mode 100644 index 000000000000..334282f05fa6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_string.go @@ -0,0 +1,396 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML